text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from lib.font import *
import sys
import fcntl
import termios
import struct
class progress_bar(object):
def __init__(self, tot=100, lenght=10):
self.cp='/-\|'
self.bar_lenght = lenght
self.tot = tot
def startprogress(self, title):
"""Creates a progress bar 40 chars long on the console
and moves cursor back to beginning with BS character"""
sys.stdout.write(title + ": [" + "-" * self.bar_lenght + "]" + chr(8) * (self.bar_lenght+1))
sys.stdout.flush()
def progress(self, x):
"""Sets progress bar to a certain percentage x.
Progress is given as whole percentage, i.e. 50% done
is given by x = 50"""
y = int(x)%4
z = int((x/float(self.tot))*self.bar_lenght)
sys.stdout.write("#" * z + self.cp[y] +"-" * (self.bar_lenght-1 - z) + "] "+ bold(str(int(x))+"/"+str(self.tot)) + chr(8) * (self.bar_lenght+4+len(str(int(x)))+len(str(self.tot)) ))
sys.stdout.flush()
def endprogress(self):
"""End of progress bar;
Write full bar, then move to next line"""
sys.stdout.write("#" * self.bar_lenght + "]\n")
sys.stdout.flush()
class all_line_progress_bar(object):
def __init__(self):
self.COLS = struct.unpack('hh', fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, '1234'))[1]
def progress(self,current, total):
prefix = '%d / %d' % (current, total)
bar_start = ' ['
bar_end = '] '
bar_size = self.COLS - len(prefix + bar_start + bar_end)
amount = int(current / (total / float(bar_size)))
remain = bar_size - amount
bar = '#' * amount + ' ' * remain
return bold(prefix) + bar_start + bar + bar_end
def bar(self, current, total):
sys.stdout.write(self.progress(current,total) + '\r')
sys.stdout.flush()
|
luca-heltai/ePICURE
|
applications/lib/progress_bar.py
|
Python
|
gpl-2.0
| 1,682 | 0.030321 |
def _types_gen(T):
yield T
if hasattr(T, 't'):
for l in T.t:
yield l
if hasattr(l, 't'):
for ll in _types_gen(l):
yield ll
class Type(type):
""" A rudimentary extension to `type` that provides polymorphic
types for run-time type checking of JSON data types. IE:
assert type(u'') == String
assert type('') == String
assert type('') == Any
assert Any.kind('') == String
assert Any.decode('str') == String
assert Any.kind({}) == Object
"""
def __init__(self, *args, **kwargs):
type.__init__(self, *args, **kwargs)
def __eq__(self, other):
for T in _types_gen(self):
if isinstance(other, Type):
if T in other.t:
return True
if type.__eq__(T, other):
return True
return False
def __str__(self):
return getattr(self, '_name', 'unknown')
def N(self, n):
self._name = n
return self
def I(self, *args):
self.t = list(args)
return self
def kind(self, t):
if type(t) is Type:
return t
ty = lambda t: type(t)
if type(t) is type:
ty = lambda t: t
return reduce(
lambda L, R: R if (hasattr(R, 't') and ty(t) == R) else L,
filter(lambda T: T is not Any,
_types_gen(self)))
def decode(self, n):
return reduce(
lambda L, R: R if (str(R) == n) else L,
_types_gen(self))
# JSON primatives and data types
Object = Type('Object', (object,), {}).I(dict).N('obj')
Number = Type('Number', (object,), {}).I(int, long).N('num')
Boolean = Type('Boolean', (object,), {}).I(bool).N('bit')
String = Type('String', (object,), {}).I(str, unicode).N('str')
Array = Type('Array', (object,), {}).I(list, set, tuple).N('arr')
Nil = Type('Nil', (object,), {}).I(type(None)).N('nil')
Any = Type('Any', (object,), {}).I(
Object, Number, Boolean, String, Array, Nil).N('any')
|
regmi/codenode-unr
|
codenode/external/jsonrpc/types.py
|
Python
|
bsd-3-clause
| 1,860 | 0.013978 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import json
except ImportError:
import simplejson as json
import logging
import socket
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from sqoop import client, conf
from sqoop.client.exception import SqoopException
from decorators import get_job_or_exception
from desktop.lib.exceptions import StructuredException
from desktop.lib.rest.http_client import RestException
from exception import handle_rest_exception
from utils import list_to_dict
from django.views.decorators.cache import never_cache
__all__ = ['get_jobs', 'create_job', 'update_job', 'job', 'jobs', 'job_clone', 'job_delete', 'job_start', 'job_stop', 'job_status']
LOG = logging.getLogger(__name__)
@never_cache
def get_jobs(request):
response = {
'status': 0,
'errors': None,
'jobs': []
}
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE)
jobs = c.get_jobs()
response['jobs'] = list_to_dict(jobs)
except RestException, e:
response.update(handle_rest_exception(e, _('Could not get jobs.')))
return HttpResponse(json.dumps(response), mimetype="application/json")
@never_cache
def create_job(request):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'job': None
}
if 'job' not in request.POST:
raise StructuredException(code="INVALID_REQUEST_ERROR", message=_('Error saving job'), data={'errors': 'job is missing.'}, error_code=400)
d = json.loads(request.POST['job'])
job = client.Job.from_dict(d)
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE)
response['job'] = c.create_job(job).to_dict()
except RestException, e:
response.update(handle_rest_exception(e, _('Could not create job.')))
except SqoopException, e:
response['status'] = 100
response['errors'] = e.to_dict()
return HttpResponse(json.dumps(response), mimetype="application/json")
@never_cache
def update_job(request, job):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'job': None
}
if 'job' not in request.POST:
raise StructuredException(code="INVALID_REQUEST_ERROR", message=_('Error saving job'), data={'errors': 'job is missing.'}, error_code=400)
job.update_from_dict(json.loads(request.POST['job']))
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE)
response['job'] = c.update_job(job).to_dict()
except RestException, e:
response.update(handle_rest_exception(e, _('Could not update job.')))
except SqoopException, e:
response['status'] = 100
response['errors'] = e.to_dict()
return HttpResponse(json.dumps(response), mimetype="application/json")
@never_cache
def jobs(request):
if request.method == 'GET':
return get_jobs(request)
elif request.method == 'POST':
return create_job(request)
else:
raise StructuredException(code="INVALID_METHOD", message=_('GET or POST request required.'), error_code=405)
@never_cache
@get_job_or_exception()
def job(request, job):
response = {
'status': 0,
'errors': None,
'job': None
}
if request.method == 'GET':
response['job'] = job.to_dict()
return HttpResponse(json.dumps(response), mimetype="application/json")
elif request.method == 'POST':
return update_job(request, job)
else:
raise StructuredException(code="INVALID_METHOD", message=_('GET or POST request required.'), error_code=405)
@never_cache
@get_job_or_exception()
def job_clone(request, job):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'job': None
}
job.id = -1
job.name = '%s-copy' % job.name
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE)
response['job'] = c.create_job(job).to_dict()
except RestException, e:
response.update(handle_rest_exception(e, _('Could not clone job.')))
except SqoopException, e:
response['status'] = 100
response['errors'] = e.to_dict()
return HttpResponse(json.dumps(response), mimetype="application/json")
@never_cache
@get_job_or_exception()
def job_delete(request, job):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'job': None
}
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE)
c.delete_job(job)
except RestException, e:
response.update(handle_rest_exception(e, _('Could not delete job.')))
except SqoopException, e:
response['status'] = 100
response['errors'] = e.to_dict()
return HttpResponse(json.dumps(response), mimetype="application/json")
@never_cache
@get_job_or_exception()
def job_start(request, job):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'submission': None
}
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE)
response['submission'] = c.start_job(job).to_dict()
except RestException, e:
response.update(handle_rest_exception(e, _('Could not start job.')))
except SqoopException, e:
response['status'] = 100
response['errors'] = [e.to_dict()]
return HttpResponse(json.dumps(response), mimetype="application/json")
@never_cache
@get_job_or_exception()
def job_stop(request, job):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'submission': None
}
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE)
response['submission'] = c.stop_job(job).to_dict()
except RestException, e:
response.update(handle_rest_exception(e, _('Could not stop job.')))
except SqoopException, e:
response['status'] = 100
response['errors'] = e.to_dict()
return HttpResponse(json.dumps(response), mimetype="application/json")
@never_cache
@get_job_or_exception()
def job_status(request, job):
if request.method != 'GET':
raise StructuredException(code="INVALID_METHOD", message=_('GET request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'submission': None
}
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE)
response['submission'] = c.get_job_status(job).to_dict()
except RestException, e:
response.update(handle_rest_exception(e, _('Could not get job status.')))
except SqoopException, e:
response['status'] = 100
response['errors'] = e.to_dict()
return HttpResponse(json.dumps(response), mimetype="application/json")
|
pwong-mapr/private-hue
|
apps/sqoop/src/sqoop/api/job.py
|
Python
|
apache-2.0
| 8,088 | 0.011375 |
#!/usr/bin/env python
# -*- coding: latin-1 -*-
import sys
#################### <FUNCIONES> ####################################################################
##<Obtener el vector C>###################################################################################################
def GetVectorC(eq):
C=[]
j=0
for i in range(len(eq)): #Recorre toda la cadena de la restriccion
if eq[i]=="+" or eq[i]=="-": #Indicador de que se ha encontrado un signo de adicion,_
#es decir acontinuacion hay un coeficiente
j=i
if eq[i]=="X" or eq[i]=="x": #Agrega al vector y el coeficiente que se encontro
C.append(float(eq[j:i]))
j=i
return C
##</Obtener el vector C>###################################################################################################
##<Encontrar Zj-Cj>###################################################################################################
def GetZjCj(A,C,Cb):
ZC=[] #Inicializa un vector ZC que contendra los Zj - Cj
for i in range(len(C)): #para cada valor en el vector C (al que ya se agregaron las A's y las h's)
ZjCj=0 #inicializa una sumatoria
for j in range(len(A)): #Multiplica Cb por Yi y resta el Ci correspondiente
ZjCj=ZjCj + float(A[j][i]*Cb[j])
ZjCj=ZjCj-C[i]
ZC.append(ZjCj) #agrega el resultado y obtenemos nuestro Zj-Cj inicial
return ZC #regresa el vector Z-C
##</Encontrar Zj-Cj>###################################################################################################
##<Encontrar Vector Cb>###################################################################################################
def GetCB(C,ColBase):
Cb=[] #este vector contendra las posiciones
for NColBase in ColBase : #en el vector ColBase estan las posiciones de los ei.
for i in range(len(C)): #y para cada una de esas posiciones de la columna vamos a
if i==NColBase : #recorrer el vector C
Cb.append(C[i]) #si estamos en la posicion de e1, esa se agrega primero a CB y asi
return Cb #sucesivamente
##</Encontrar Vector Cb>###################################################################################################
##<Creación de la matriz A>###################################################################################################
def MatrixA(restricciones,C,M,min_max):
A=[]
XB=[]
D=[]
##agrega a la matrix A los valores de los coeficientes de las retriscciones
for rest in restricciones :
y=[]
j=0
for i in range(len(rest)): #Recorre toda la cadena de la restriccion
if rest[i]=="+" or rest[i]=="-": #Indicador de que se ha encontrado un signo de adicion, es decir acontinuacion hay un coeficiente
j=i
if rest[i]=="X" or rest[i]=="x": #Agrega al vector y el coeficiente que se encontro
y.append(float(rest[j:i]))
j=i
if rest[i]=="<" or rest[i]==">" or rest[i]=="=" :
D.append(rest[i:i+2]) #agrega a D la restriccion para agregar las H, A
XB.append(float(rest[i+2:])) #Hace arreglo con los valores de las restricciones
break
if y != [] : A.append(y)
##Agrega a A los coeficientes de las variables de holgura
for i in range(len(D)): #Recorre las direcciones de las restricciones
if D[i] == "<=":
for j in range(len(D)): #Recorre las ecuaciones
if j ==i : #Si es la ecuacion correspondiente a la restriccion
A[j].append(1) #apendisa 1
C.append(0)
else :
A[j].append(0) #Otro caso apendiza 0
elif D[i] == ">=": #Análogo
for j in range(len(D)):
if j == i :
A[j].append(-1)
C.append(0)
else :
A[j].append(0)
##Agrega a A los coeficientes de las variables ARTIFICIALES
for i in range(len(D)): #Recorre las direcciones de las restricciones
if D[i] == "==":
for j in range(len(D)): #Recorre las ecuaciones
if j ==i : #Si es la ecuacion correspondiente a la restriccion
A[j].append(1) #apendisa 1
if min_max == "M" :
C.append( -1 * M )
else :
C.append( M )
else :
A[j].append(0) #Otro caso apendiza 0
elif D[i] == ">=": #Análogo
for j in range(len(D)):
if j == i :
A[j].append(1)
if min_max == "M" :
C.append( -1 * M )
else :
C.append( M )
else :
A[j].append(0)
return A, XB, C
##</Creacion de la matrix A>######################################################
##<Imprimir una matrix>###########################################################
def MatrixPrint(Matrix):
MatrixFormat=""
for i in range(len(Matrix)) :
for j in range(len(Matrix[i])) :
MatrixFormat = MatrixFormat + str(Matrix[i][j]) + ' '
MatrixFormat = MatrixFormat + "\n"
return MatrixFormat
##</Imprimir una matrix>###########################################################
##<Imprimir un vector>###########################################################
def VectorPrint(Matrix):
MatrixFormat="[ "
for i in range(len(Matrix)) :
if i == (len(Matrix) - 1) :
MatrixFormat = MatrixFormat + str(Matrix[i])
else :
MatrixFormat = MatrixFormat + str(Matrix[i]) + ', '
return MatrixFormat + " ]"
##</Imprimir un vector>###########################################################
##<Identificar la Base>############################################################
def Base(A):
B=[] #inicializa la matriz B. esta matriz contiene las ORDENADAS posiciones de la base canonica
CB=[] #Contiene las posiciones de la base ordenadas de derecha a izquierda
for j in range(len(A[1])-1,0,-1) : #Recorremos las columnas de la matriz a del final al inicio
Bt=[] #Bt contiene la columna i-esima de la matriz A
for i in range(len(A)) : #Asignacion de Bt
Bt.append(A[i][j])
if Bt.count(1) == 1 : #si en Bt solo se encuentra un 1, es decir Bt=ei se agrega suposicion a la lista CB
CB.append(j)
if len(CB)>=len(A) : #Condicion de parada, si de derecha a izquierda hay mas vectores ei que los que
break #la base canonica del problema contine sale deja de buscar vectores ei
for i in range(len(A)): #Recorre los renglores de A
for j in CB : #^solo en las columnas en las que se encontraron vectores ei
if A[i][j] == 1 : #y ordena para tener la matriz canonica
B.append(j)
return B
##</Identificar la Base>############################################################
##<Identificar variables de entrada y salida>############################################################
def EntradaSalida(A,ZC,XB,min_max) :
entrada = 0 #iniciamos la entrada, es decir el valor j en cero
salida = 0
if min_max == "M" :
for i in range(1,len(ZC)) : #recorre todo ZC empezando por la segunda posicion, variando i
if ZC[ i ] <= ZC[ entrada ] : #compara i con la posicion de entrada,para la primera vuelta ZC[1] con ZC[0]
entrada = i #si la posicion i es menor a la posicion anterior se reasigna la entrada
else :
for i in range(1,len(ZC)) : #recorre todo ZC empezando por la segunda posicion, variando i
if ZC[ i ] >= ZC[ entrada ] : #compara i con la posicion de entrada,para la primera vuelta ZC[1] con ZC[0]
entrada = i #si la posicion i es menor a la posicion anterior se reasigna
for j in range(len(A)) : #protege de dividir por cero
if A[ j ][ entrada ] > 0 :
salida = j
break
for j in range(1,len(A)) : #analógo pero con la divicion de XB/Yij, cuando se encuentra el menor se leasigna a
if A[ j ][ entrada ] > 0 : #Protege de dividir por cero
if XB[ j ]/A[ j ][ entrada ] <= XB[ salida ]/A[ salida ][ entrada ] :
salida = j
return entrada, salida
##</Identificar variables de entrada y salida>############################################################
##<Calcular las ecuaciones de transformacion>############################################################
def Ecuaciones_Trans(A,XB,ZC,entrada,salida) :
if wo == False :
print "Entra: " + str(entrada) + " Sale: " +str(salida) +"\nYij:"
else :
output.write("\n\nEntra: " + str(entrada) + " Sale: " +str(salida) +"\nYij:\n")
Yij=[]
##Calcular Y######
for i in range(len(A)) : #recorre TODA la matriz A, es decir todos los vectores Yij
SYij=[]
if i != salida : #cuando estamos en un renglon k != de r (de la salida) usamos la formula adecuada
for k in range(len(A[i])) :
SYij.append(A[i][k]-(A[salida][k]/A[salida][entrada])*A[i][entrada]) #se guarda el renglon entero en un vector
ygorro = "^y" + str(i) + str(k) + " = y" + str(i) + str(k) + " - y" + str(salida) + str(k) + "/y"+ str(salida) + str(entrada) + "*Y"+ str(i) + str(entrada) +" = "+ str(A[i][k]-(A[salida][k]/A[salida][entrada])*A[i][entrada])
if wo ==False :
print ygorro
else :
output.write(ygorro + "\n")
Yij.append(SYij) #todos los renglones se guardan en una matriz que luego será la nueva A
else :
for k in range(len(A[i])) : #analogamanete cuando k=r
varsyij = A[salida][k]/A[salida][entrada]
SYij.append(varsyij)
ygorro = "^y" + str(i) + str(k) + " = y" + str(salida) + str(k) + "/y"+ str(salida) + str(entrada) + " = " + str(varsyij)
if wo ==False :
print ygorro
else :
output.write(ygorro + "\n")
Yij.append(SYij) #todos los renglones se guardan en una matriz que luego será la nueva A
##Calcular Zj-Cj######
if wo == False :
print "\nZj-Cj:"
else :
output.write("\nZj-Cj:\n")
SZC=[]
for k in range(len(ZC)) : #calcula todas las Zj-Cj de acuerdo a la formula de cambio y lo guarda en SZC
varszc = ZC[k]-(A[salida][k]/A[salida][entrada])*ZC[entrada]
SZC.append(varszc) # que se convertira en el nuevo ZC, es decir Zj-Cj
zcgorro= "^Z" + str(k) + " - C" + str(k) + " = (Z" + str(k) + " - C" + str(k) + ") - ( y" + str(salida) + str(k) + " / y"+str(salida) + str(entrada)+" ) * (Z" + str(entrada) + " - C" + str(entrada) + ") = " + str(varszc)
if wo == False :
print zcgorro
else :
output.write (zcgorro + "\n")
##Calcular las XB#####
if wo == False :
print "\nXB's:"
else:
output.write("\nXB's:")
SXB=[]
for i in range(len(XB)) : #Procedimiento adecuado para las XB. Estas son almacenadas en un vector SXV
if i != salida : #que será el nuevo XB
varsxb=XB[i]-(XB[salida]/A[salida][entrada])*A[i][entrada]
SXB.append(varsxb)
xbgorro = "^XB" + str (i) +" = XB" +str(i)+ " - (XB"+str(salida)+" / y"+str(salida)+str(entrada)+" ) * y"+str(i)+str(entrada) +" = " + str(varsxb)
if wo == False :
print xbgorro
else :
output.write(xbgorro + "\n")
else :
varsxb=XB[salida]/A[salida][entrada]
SXB.append(varsxb)
xbgorro = "^XB" + str (i) +" = XB"+str(salida)+" / y"+str(salida)+str(entrada) +" = " + str(varsxb)
if wo == False :
print xbgorro
else :
output.write(xbgorro + "\n")
##Reasignar los resultados de las ecuaciones de transformacion ###########
return Yij, SZC, SXB #se regresan Yij, SZC y SXB para su reasignacion
##</Calcular las ecuaciones de transformacion>############################################################
######################################################### </FUNCIONES> #######################################################
##################################################### <MAIN> ################################################
print "\nResolución de PPL, por el metodo Simplex"
#Inicializar variables######
rest=""
restricciones=[]
datos=False
wo=False
############################
##Pidiendo los datos desde un archivo
for argumento in sys.argv :
if argumento == "-V" or argumento == "--about":
print """Resolución del Metodo Simplex Version 1.2 RC
Desarrollado por:
> Ramirez Vasquez Indira 131162
> Rivas Espinoza Arturo
> Reyes Conde Ivan 131621
Oct 2008
"""
sys.exit()
if argumento[:2] == "-S" :
f = open(argumento[3:],'r') ##Abre el archivo indicado por el argumento -S
fuente=f.readlines() ##Crea un arreglo con las lineas del archivo
f.close() ##Cierra el archivo
min_max=fuente[0][0] ##Asigna el minmax
eq=fuente[1][:-1] ##Asigna la funcion objetivo
for i in range(2,len(fuente)-1) :
restricciones.append(fuente[i][:-1]) ##Asigna las restricciones
datos=True ##Levanta una bandera para decir que ya se tienen todos los datos
##Pidiendo los datos por linea de comando
if datos == False :
##Pedir funcion a optimizar
min_max = raw_input("Es un problema de Maximización/minimización? (M/m) ")
while (min_max.upper() != "M") :
min_max = raw_input("Opción no valida, Maximización o minimizacion? (M/m)")
if min_max == "M":
print "\nProblema de Maximizacion"
elif min_max =="m":
print "\nProblema de minimizacion"
eq= raw_input("\nIntroduzca la ecuación Z a optimizar\nZ=")
##Pedir restricciones
print "\n\nIntroduzca las restricciones\n"
while rest != "." : #mientras no se le alimente con un '.' sigue agregando restricciones
rest=raw_input()
if rest !="." :
restricciones.append(rest)
for argumento in sys.argv :
if argumento[:2] == "-O" :
output=open(argumento[3:],'w')
wo=True
#####REALIZANDO LAS OPERACIONES
##Busqueda de vectores necesarios
C=GetVectorC(eq) #Encontrando C, agregando h's y A's
A, XB, C = MatrixA(restricciones,C,10000,min_max) #Buscando la primera matriz A, con M=10,000. TODO pedir M
ColBase = Base(A) #Buscando y ordenando las columnas con ei's es decir donde esta la base canonica
Cb = GetCB(C,ColBase) #a partir de las posiciones de ei encuentra Cb
ZC = GetZjCj(A,C,Cb) #calcula la primera Zj-Cj
PTabla= "\n\nPrimera tabla:\n" + (MatrixPrint (A)) + "\nZj-Cj inicial: " + (VectorPrint(ZC)) + "\nXB inicial: " + (VectorPrint(XB))
if wo == False :
print PTabla
raw_input ("\nPresione Enter para continuar\n\n")
else :
output.write(min_max + " Z = " + eq + "\n")
output.write("s.a.\n")
for rest in restricciones :
output.write(rest + "\n")
output.write(PTabla)
#criterio de parada del ciclo de iteraciones
if min_max == "M" :
while [val for val in ZC if val < 0] : #sihay un valor en ZC que sea menor que CERO
entrada,salida = EntradaSalida(A,ZC,XB,"M") #realiza una nueva iteracion
A, ZC, XB=Ecuaciones_Trans(A,XB,ZC,entrada,salida)
if wo == False : raw_input("\nPresione Enter para continuar\n\n")
else :
while [val for val in ZC if val > 0] : #sihay un valor en ZC que sea MAYOR que CERO
entrada,salida = EntradaSalida(A,ZC,XB,"m") #realiza una nueva iteracion
A, ZC, XB=Ecuaciones_Trans(A,XB,ZC,entrada,salida)
if wo == False : raw_input("\nPresione Enter para continuar\n\n")
UTabla = "\n\nSe ha encontrado el Óptimo.\nUltima tabla:\n" + (MatrixPrint (A)) + "\nZj-Cj final: " + (VectorPrint(ZC)) + "\nXB final: " + (VectorPrint(XB))
if wo==False :
print UTabla
else :
output.write(UTabla)
output.close()
print "\nSe ha encontrado una solucion optima y se escribio en " + output.name + "Gracias por usar nuestro Software"
|
Darkade/udlap
|
6to/simplex.py
|
Python
|
apache-2.0
| 14,976 | 0.049195 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 12, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 0);
|
antoinecarme/pyaf
|
tests/artificial/transf_None/trend_LinearTrend/cycle_12/ar_/test_artificial_128_None_LinearTrend_12__20.py
|
Python
|
bsd-3-clause
| 262 | 0.087786 |
# Copyright 2018 Flight Lab authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for network related helpers."""
import socket
def get_ip():
"""Get primary IP (the one with a default route) of local machine.
This works on both Linux and Windows platforms, and doesn't require working
internet connection.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
return s.getsockname()[0]
except:
return '127.0.0.1'
finally:
s.close()
|
google/flight-lab
|
controller/common/net.py
|
Python
|
apache-2.0
| 1,062 | 0.00565 |
#!/usr/bin/env python
"""
@package mi.dataset.parser.test
@file mi-dataset/mi/dataset/parser/test/test_fuelcell_eng_dcl.py
@author Chris Goodrich
@brief Test code for the fuelcell_eng_dcl parser
Release notes:
initial release
"""
__author__ = 'cgoodrich'
import os
from nose.plugins.attrib import attr
from mi.core.exceptions import ConfigurationException
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.fuelcell_eng.dcl.resource import RESOURCE_PATH
from mi.dataset.parser.fuelcell_eng_dcl import FuelCellEngDclParser
from mi.dataset.parser.fuelcell_eng_dcl import FuelCellEngDclParticleClassKey,\
FuelCellEngDclDataParticleRecovered,\
FuelCellEngDclDataParticleTelemetered
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.logging import log
@attr('UNIT', group='mi')
class FuelCellEngDclParserUnitTestCase(ParserUnitTestCase):
"""
fuelcell_eng_dcl Parser unit test suite
"""
def setUp(self):
ParserUnitTestCase.setUp(self)
self._recovered_parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.fuelcell_eng_dcl',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
FuelCellEngDclParticleClassKey.ENGINEERING_DATA_PARTICLE_CLASS: FuelCellEngDclDataParticleRecovered
}
}
self._telemetered_parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.fuelcell_eng_dcl',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
FuelCellEngDclParticleClassKey.ENGINEERING_DATA_PARTICLE_CLASS: FuelCellEngDclDataParticleTelemetered
}
}
self._incomplete_parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.fuelcell_eng_dcl',
DataSetDriverConfigKeys.PARTICLE_CLASS: None
}
self._bad_parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.fuelcell_eng_dcl',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {}
}
def test_simple(self):
"""
Read file and verify that all expected particles can be read.
Verify that the contents of the particles are correct.
This is the happy path.
"""
log.debug('===== START TEST SIMPLE =====')
num_particles_to_request = 25
num_expected_particles = 20
# Test the recovered version
log.debug('------ RECOVERED ------')
with open(os.path.join(RESOURCE_PATH, '20141207s.pwrsys.log'), 'rU') as file_handle:
parser = FuelCellEngDclParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'recovered_20141207s.pwrsys.yml', RESOURCE_PATH)
# Test the telemetered version
log.debug('----- TELEMETERED -----')
with open(os.path.join(RESOURCE_PATH, '20141207s.pwrsys.log'), 'rU') as file_handle:
parser = FuelCellEngDclParser(self._telemetered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'telemetered_20141207s.pwrsys.yml', RESOURCE_PATH)
log.debug('===== END TEST SIMPLE =====')
def test_bigfile(self):
"""
Read file and verify that all expected particles can be read.
Verify that the expected number of particles are produced.
Only one test is run as the content of the input files is the
same for recovered or telemetered.
"""
log.debug('===== START TEST BIGFILE =====')
num_particles_to_request = num_expected_particles = 870
with open(os.path.join(RESOURCE_PATH, '20141207.pwrsys.log'), 'rU') as file_handle:
parser = FuelCellEngDclParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
log.debug('===== END TEST BIGFILE =====')
def test_bad_checksum(self):
"""
Read file and verify that all expected particles can be read.
There are two lines with bad checksums in the file. The checksum
after the colon is incorrect on lines 10 and 23 of the input file.
Only one test is run as the content of the input files is the
same for recovered or telemetered.
"""
log.debug('===== START TEST BAD CHECKSUM =====')
num_particles_to_request = num_expected_particles = 18
with open(os.path.join(RESOURCE_PATH, '20141207s_bcs.pwrsys.log'), 'rU') as file_handle:
parser = FuelCellEngDclParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
log.debug('===== END TEST BAD CHECKSUM =====')
def test_badly_formed(self):
"""
Read file and verify that all expected particles can be read.
Line 1 Improperly formatted - No particle generated
Line 2 Improperly formatted - No particle generated
Line 9 - Bad checksum - No particle generated
No fuel cell data present on line 11 - No particle generated
No fuel cell data present on line 12 - No particle generated
No fuel cell data present on line 13 - No particle generated
No fuel cell data present on line 14 - No particle generated
No fuel cell data present on line 15 - No particle generated
Line 20 - Bad checksum - No particle generated
Line 24 Improperly formatted - No particle generated
Line 26 Improperly formatted - No particle generated
Line 27 Improperly formatted - No particle generated
Line 28 Bad/Missing Timestamp - No particle generated
Line 29 Bad/Missing Timestamp - No particle generated
Line 30 No data found - No particle generated
Line 31 No terminator found - No particle generated
Line 32 Improper format - No particle generated
Only one test is run as the content of the input files
is the same for recovered or telemetered.
"""
log.debug('===== START TEST BADLY FORMED =====')
num_particles_to_request = 33
num_expected_particles = 16
with open(os.path.join(RESOURCE_PATH, '20141207_badform.pwrsys.log'), 'rU') as file_handle:
parser = FuelCellEngDclParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
log.debug('===== END TEST BADLY FORMED =====')
def test_bad_configuration(self):
"""
Attempt to build a parser with a bad configuration.
"""
log.debug('===== START TEST BAD CONFIGURATION =====')
with open(os.path.join(RESOURCE_PATH, '20141207s.pwrsys.log'), 'rU') as file_handle:
with self.assertRaises(ConfigurationException):
parser = FuelCellEngDclParser(self._bad_parser_config,
file_handle,
self.exception_callback)
log.debug('===== END TEST BAD CONFIGURATION =====')
def test_partial_configuration(self):
"""
Attempt to build a parser with a bad configuration.
"""
log.debug('===== START TEST PARTIAL CONFIGURATION =====')
with open(os.path.join(RESOURCE_PATH, '20141207s.pwrsys.log'), 'rU') as file_handle:
with self.assertRaises(ConfigurationException):
parser = FuelCellEngDclParser(self._incomplete_parser_config,
file_handle,
self.exception_callback)
log.debug('===== END TEST PARTIAL CONFIGURATION =====')
def test_blank_line(self):
"""
Read file and verify that all expected particles can be read.
Verify that the contents of the particles are correct. There are
blank lines interspersed in the file. This test verifies that
these blank lines do not adversely affect the parser. Only one
test is run as the content of the input files is the same for
recovered or telemetered.
"""
log.debug('===== START TEST BLANK LINE =====')
num_particles_to_request = 25
num_expected_particles = 20
with open(os.path.join(RESOURCE_PATH, '20141207sbl.pwrsys.log'), 'rU') as file_handle:
parser = FuelCellEngDclParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
log.debug('===== END TEST BLANK LINE =====')
|
petercable/mi-dataset
|
mi/dataset/parser/test/test_fuelcell_eng_dcl.py
|
Python
|
bsd-2-clause
| 9,992 | 0.001601 |
"""
Github Authentication
"""
import httplib2
from django.conf import settings
from django.core.mail import send_mail
from oauth2client.client import OAuth2WebServerFlow
from helios_auth import utils
# some parameters to indicate that status updating is not possible
STATUS_UPDATES = False
# display tweaks
LOGIN_MESSAGE = "Log in with GitHub"
def get_flow(redirect_url=None):
return OAuth2WebServerFlow(
client_id=settings.GH_CLIENT_ID,
client_secret=settings.GH_CLIENT_SECRET,
scope='read:user user:email',
auth_uri="https://github.com/login/oauth/authorize",
token_uri="https://github.com/login/oauth/access_token",
redirect_uri=redirect_url,
)
def get_auth_url(request, redirect_url):
flow = get_flow(redirect_url)
request.session['gh_redirect_uri'] = redirect_url
return flow.step1_get_authorize_url()
def get_user_info_after_auth(request):
redirect_uri = request.session['gh_redirect_uri']
del request.session['gh_redirect_uri']
flow = get_flow(redirect_uri)
if 'code' not in request.GET:
return None
code = request.GET['code']
credentials = flow.step2_exchange(code)
http = httplib2.Http(".cache")
http = credentials.authorize(http)
(_, content) = http.request("https://api.github.com/user", "GET")
response = utils.from_json(content.decode('utf-8'))
user_id = response['login']
user_name = response['name']
(_, content) = http.request("https://api.github.com/user/emails", "GET")
response = utils.from_json(content.decode('utf-8'))
user_email = None
for email in response:
if email['verified'] and email['primary']:
user_email = email['email']
break
if not user_email:
raise Exception("email address with GitHub not verified")
return {
'type': 'github',
'user_id': user_id,
'name': '%s (%s)' % (user_id, user_name),
'info': {'email': user_email},
'token': {},
}
def do_logout(user):
return None
def update_status(token, message):
pass
def send_message(user_id, name, user_info, subject, body):
send_mail(
subject,
body,
settings.SERVER_EMAIL,
["%s <%s>" % (user_id, user_info['email'])],
fail_silently=False,
)
def check_constraint(eligibility, user_info):
pass
#
# Election Creation
#
def can_create_election(user_id, user_info):
return True
|
benadida/helios-server
|
helios_auth/auth_systems/github.py
|
Python
|
apache-2.0
| 2,315 | 0.015983 |
""" KeystoneClient class encapsulates the work with the keystone service interface
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import requests
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.Time import fromString, dateTime
__RCSID__ = '$Id$'
class KeystoneClient():
"""
"""
def __init__(self, url, parameters):
self.log = gLogger.getSubLogger("Keystone")
self.url = url
self.apiVersion = None
if "v3" in url:
self.apiVersion = 3
if "v2" in url:
self.apiVersion = 2
if self.apiVersion is None:
# Assume v2.0
self.apiVersion = 2
self.url = self.url.rstrip('/') + "/v2.0"
self.parameters = parameters
self.token = None
self.expires = None
self.project = self.parameters.get('Tenant', self.parameters.get('Project'))
self.projectID = self.parameters.get('ProjectID')
self.computeURL = None
self.imageURL = None
self.networkURL = None
self.caPath = self.parameters.get('CAPath', True)
self.valid = False
result = self.initialize()
if result['OK']:
self.valid = True
else:
gLogger.error("Keystone initialization failed: %s" % result['Message'])
def initialize(self):
""" Initialize the Keystone object obtaining the corresponding token
:return: S_OK/S_ERROR
"""
self.log.debug("Initializing for API version %d" % self.apiVersion)
result = self.getToken()
if not result['OK']:
return result
# If the tenant is not specified, try to get it and obtain the tenant specific token
if not self.project:
result = self.getTenants()
if not result['OK']:
return result
if result['Value']:
self.project, self.projectID = result['Value'][0]
result = self.getToken(force=True)
if not result['OK']:
return result
return S_OK()
def getToken(self, force=False):
"""Get the Keystone token
:param force: flag to force getting the token if even there is one in the cache
:return: S_OK(token) or S_ERROR
"""
if self.token is not None and not force:
if self.expires and (self.expires - dateTime()).seconds > 300:
return S_OK(self.token)
if self.apiVersion == 2:
result = self.__getToken2()
else:
result = self.__getToken3()
return result
def __getToken2(self):
"""Get the Keystone token for the version v2 of the keystone service
:return: S_OK(token) or S_ERROR
"""
user = self.parameters.get('User')
password = self.parameters.get('Password')
authArgs = {}
if user and password:
authDict = {'auth': {"passwordCredentials": {"username": user,
"password": password}
}
}
if self.project:
authDict['auth']['tenantName'] = self.project
elif self.parameters.get('Auth') == "voms":
authDict = {'auth': {'voms': True}}
if self.project:
authDict['auth']['tenantName'] = self.project
if self.parameters.get('Proxy'):
authArgs['cert'] = self.parameters.get('Proxy')
try:
result = requests.post("%s/tokens" % self.url,
headers={"Content-Type": "application/json"},
json=authDict,
verify=self.caPath,
**authArgs)
except Exception as exc:
return S_ERROR('Exception getting keystone token: %s' % str(exc))
output = result.json()
if result.status_code in [400, 401]:
message = "None"
if 'error' in output:
message = output['error'].get('message')
return S_ERROR('Authorization error: %s' % message)
self.token = str(output['access']['token']['id'])
expires = fromString(str(output['access']['token']['expires']).replace('T', ' ').replace('Z', ''))
issued = fromString(str(output['access']['token']['issued_at']).replace('T', ' ').replace('Z', ''))
self.expires = dateTime() + (expires - issued)
self.projectID = output['access']['token']['tenant']['id']
for endpoint in output['access']['serviceCatalog']:
if endpoint['type'] == 'compute':
self.computeURL = str(endpoint['endpoints'][0]['publicURL'])
elif endpoint['type'] == 'image':
self.imageURL = str(endpoint['endpoints'][0]['publicURL'])
elif endpoint['type'] == 'network':
self.networkURL = str(endpoint['endpoints'][0]['publicURL'])
return S_OK(self.token)
def __getToken3(self):
"""Get the Keystone token for the version v3 of the keystone service
:return: S_OK(token) or S_ERROR
"""
domain = self.parameters.get('Domain', "Default")
user = self.parameters.get('User')
password = self.parameters.get('Password')
appcred_file = self.parameters.get('Appcred')
authDict = {}
authArgs = {}
if user and password:
authDict = {'auth': {"identity": {"methods": ["password"],
"password": {"user": {"name": user,
"domain": {"name": domain},
"password": password
}
}
}
}
}
elif self.parameters.get('Auth') == "voms":
authDict = {"auth": {"identity": {"methods": ["mapped"],
"mapped": {'voms': True,
'identity_provider': 'egi.eu',
"protocol": 'mapped'}}}}
if self.parameters.get('Proxy'):
authArgs['cert'] = self.parameters.get('Proxy')
elif appcred_file:
# The application credentials are stored in a file of the format:
# id secret
ac_fd = open(appcred_file, 'r')
auth_info = ac_fd.read()
auth_info = auth_info.strip()
ac_id, ac_secret = auth_info.split(" ", 1)
ac_fd.close()
authDict = {'auth': {"identity": {"methods": ["application_credential"],
"application_credential": {"id": ac_id,
"secret": ac_secret}}}}
else:
return S_ERROR("No valid credentials provided")
# appcred includes the project scope binding in the credential itself
if self.project and not appcred_file:
authDict['auth']['scope'] = {"project": {"domain": {"name": domain},
"name": self.project
}
}
gLogger.debug('Request token with auth arguments: %s and body %s' %
(str(authArgs), str(authDict)))
url = "%s/auth/tokens" % self.url
try:
result = requests.post(url,
headers={"Content-Type": "application/json",
"Accept": "application/json",
},
json=authDict,
verify=self.caPath,
**authArgs)
except Exception as exc:
return S_ERROR('Exception getting keystone token: %s' % str(exc))
if result.status_code not in [200, 201, 202, 203, 204]:
return S_ERROR('Failed to get keystone token: %s' % result.text)
try:
self.token = result.headers['X-Subject-Token']
except Exception as exc:
return S_ERROR('Failed to get keystone token: %s' % str(exc))
output = result.json()
expires = fromString(str(output['token']['expires_at']).replace('T', ' ').replace('Z', ''))
issued = fromString(str(output['token']['issued_at']).replace('T', ' ').replace('Z', ''))
self.expires = dateTime() + (expires - issued)
if 'project' in output['token']:
if output['token']['project']['name'] == self.project:
self.projectID = output['token']['project']['id']
if 'catalog' in output['token']:
for service in output['token']['catalog']:
if service['type'] == 'compute':
for endpoint in service['endpoints']:
if endpoint['interface'] == 'public':
self.computeURL = str(endpoint['url'])
elif service['type'] == 'image':
for endpoint in service['endpoints']:
if endpoint['interface'] == 'public':
self.imageURL = str(endpoint['url'])
elif service['type'] == 'network':
for endpoint in service['endpoints']:
if endpoint['interface'] == 'public':
self.networkURL = str(endpoint['url'])
return S_OK(self.token)
def getTenants(self):
"""Get available tenants for the current token
:return: S_OK((tenant, tenant_id)) or S_ERROR
"""
if self.token is None:
return S_ERROR("No Keystone token yet available")
try:
result = requests.get("%s/tenants" % self.url,
headers={"Content-Type": "application/json",
"X-Auth-Token": self.token},
verify=self.caPath)
except Exception as exc:
return S_ERROR('Failed to get keystone token: %s' % str(exc))
if result.status_code != 200:
return S_ERROR("Error: %s" % result.text)
output = result.json()
tenants = []
if "tenants" in output:
for item in output["tenants"]:
tenants.append((item["name"], item['id']))
return S_OK(tenants)
|
DIRACGrid/VMDIRAC
|
VMDIRAC/Resources/Cloud/KeystoneClient.py
|
Python
|
gpl-3.0
| 9,771 | 0.007983 |
from pymemcache.client.hash import HashClient
from pymemcache.client.base import Client, PooledClient
from pymemcache.exceptions import MemcacheError, MemcacheUnknownError
from pymemcache import pool
from .test_client import ClientTestMixin, MockSocket
import unittest
import pytest
import mock
import socket
class TestHashClient(ClientTestMixin, unittest.TestCase):
def make_client_pool(self, hostname, mock_socket_values,
serializer=None, **kwargs):
mock_client = Client(hostname, serializer=serializer, **kwargs)
mock_client.sock = MockSocket(mock_socket_values)
client = PooledClient(hostname, serializer=serializer)
client.client_pool = pool.ObjectPool(lambda: mock_client)
return mock_client
def make_client(self, *mock_socket_values, **kwargs):
current_port = 11012
client = HashClient([], **kwargs)
ip = '127.0.0.1'
for vals in mock_socket_values:
s = '%s:%s' % (ip, current_port)
c = self.make_client_pool(
(ip, current_port),
vals,
**kwargs
)
client.clients[s] = c
client.hasher.add_node(s)
current_port += 1
return client
def test_setup_client_without_pooling(self):
with mock.patch('pymemcache.client.hash.Client') as internal_client:
client = HashClient([], timeout=999, key_prefix='foo_bar_baz')
client.add_server('127.0.0.1', '11211')
assert internal_client.call_args[0][0] == ('127.0.0.1', '11211')
kwargs = internal_client.call_args[1]
assert kwargs['timeout'] == 999
assert kwargs['key_prefix'] == 'foo_bar_baz'
def test_get_many_all_found(self):
client = self.make_client(*[
[b'STORED\r\n', b'VALUE key3 0 6\r\nvalue2\r\nEND\r\n', ],
[b'STORED\r\n', b'VALUE key1 0 6\r\nvalue1\r\nEND\r\n', ],
])
def get_clients(key):
if key == b'key3':
return client.clients['127.0.0.1:11012']
else:
return client.clients['127.0.0.1:11013']
client._get_client = get_clients
result = client.set(b'key1', b'value1', noreply=False)
result = client.set(b'key3', b'value2', noreply=False)
result = client.get_many([b'key1', b'key3'])
assert result == {b'key1': b'value1', b'key3': b'value2'}
def test_get_many_some_found(self):
client = self.make_client(*[
[b'END\r\n', ],
[b'STORED\r\n', b'VALUE key1 0 6\r\nvalue1\r\nEND\r\n', ],
])
def get_clients(key):
if key == b'key3':
return client.clients['127.0.0.1:11012']
else:
return client.clients['127.0.0.1:11013']
client._get_client = get_clients
result = client.set(b'key1', b'value1', noreply=False)
result = client.get_many([b'key1', b'key3'])
assert result == {b'key1': b'value1'}
def test_get_many_bad_server_data(self):
client = self.make_client(*[
[b'STORED\r\n', b'VAXLUE key3 0 6\r\nvalue2\r\nEND\r\n', ],
[b'STORED\r\n', b'VAXLUE key1 0 6\r\nvalue1\r\nEND\r\n', ],
])
def get_clients(key):
if key == b'key3':
return client.clients['127.0.0.1:11012']
else:
return client.clients['127.0.0.1:11013']
client._get_client = get_clients
with pytest.raises(MemcacheUnknownError):
client.set(b'key1', b'value1', noreply=False)
client.set(b'key3', b'value2', noreply=False)
client.get_many([b'key1', b'key3'])
def test_get_many_bad_server_data_ignore(self):
client = self.make_client(*[
[b'STORED\r\n', b'VAXLUE key3 0 6\r\nvalue2\r\nEND\r\n', ],
[b'STORED\r\n', b'VAXLUE key1 0 6\r\nvalue1\r\nEND\r\n', ],
], ignore_exc=True)
def get_clients(key):
if key == b'key3':
return client.clients['127.0.0.1:11012']
else:
return client.clients['127.0.0.1:11013']
client._get_client = get_clients
client.set(b'key1', b'value1', noreply=False)
client.set(b'key3', b'value2', noreply=False)
result = client.get_many([b'key1', b'key3'])
assert result == {}
def test_gets_many(self):
client = self.make_client(*[
[b'STORED\r\n', b'VALUE key3 0 6 1\r\nvalue2\r\nEND\r\n', ],
[b'STORED\r\n', b'VALUE key1 0 6 1\r\nvalue1\r\nEND\r\n', ],
])
def get_clients(key):
if key == b'key3':
return client.clients['127.0.0.1:11012']
else:
return client.clients['127.0.0.1:11013']
client._get_client = get_clients
assert client.set(b'key1', b'value1', noreply=False) is True
assert client.set(b'key3', b'value2', noreply=False) is True
result = client.gets_many([b'key1', b'key3'])
assert (result ==
{b'key1': (b'value1', b'1'), b'key3': (b'value2', b'1')})
def test_no_servers_left(self):
from pymemcache.client.hash import HashClient
client = HashClient(
[], use_pooling=True,
ignore_exc=True,
timeout=1, connect_timeout=1
)
hashed_client = client._get_client('foo')
assert hashed_client is None
def test_no_servers_left_raise_exception(self):
from pymemcache.client.hash import HashClient
client = HashClient(
[], use_pooling=True,
ignore_exc=False,
timeout=1, connect_timeout=1
)
with pytest.raises(MemcacheError) as e:
client._get_client('foo')
assert str(e.value) == 'All servers seem to be down right now'
def test_unavailable_servers_zero_retry_raise_exception(self):
from pymemcache.client.hash import HashClient
client = HashClient(
[('example.com', 11211)], use_pooling=True,
ignore_exc=False,
retry_attempts=0, timeout=1, connect_timeout=1
)
with pytest.raises(socket.error):
client.get('foo')
def test_no_servers_left_with_commands_return_default_value(self):
from pymemcache.client.hash import HashClient
client = HashClient(
[], use_pooling=True,
ignore_exc=True,
timeout=1, connect_timeout=1
)
result = client.get('foo')
assert result is None
result = client.set('foo', 'bar')
assert result is False
def test_no_servers_left_with_set_many(self):
from pymemcache.client.hash import HashClient
client = HashClient(
[], use_pooling=True,
ignore_exc=True,
timeout=1, connect_timeout=1
)
result = client.set_many({'foo': 'bar'})
assert result is False
def test_no_servers_left_with_get_many(self):
from pymemcache.client.hash import HashClient
client = HashClient(
[], use_pooling=True,
ignore_exc=True,
timeout=1, connect_timeout=1
)
result = client.get_many(['foo', 'bar'])
assert result == {'foo': False, 'bar': False}
# TODO: Test failover logic
|
bwalks/pymemcache
|
pymemcache/test/test_client_hash.py
|
Python
|
apache-2.0
| 7,403 | 0 |
# flake8: noqa
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PersonTranslation.roman_first_name'
db.add_column('people_persontranslation', 'roman_first_name',
self.gf('django.db.models.fields.CharField')(default=' ', max_length=256),
keep_default=False)
# Adding field 'PersonTranslation.roman_last_name'
db.add_column('people_persontranslation', 'roman_last_name',
self.gf('django.db.models.fields.CharField')(default='', max_length=256, blank=True),
keep_default=False)
# Adding field 'PersonTranslation.non_roman_first_name'
db.add_column('people_persontranslation', 'non_roman_first_name',
self.gf('django.db.models.fields.CharField')(default=' ', max_length=256),
keep_default=False)
# Adding field 'PersonTranslation.non_roman_last_name'
db.add_column('people_persontranslation', 'non_roman_last_name',
self.gf('django.db.models.fields.CharField')(default='', max_length=256, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'PersonTranslation.roman_first_name'
db.delete_column('people_persontranslation', 'roman_first_name')
# Deleting field 'PersonTranslation.roman_last_name'
db.delete_column('people_persontranslation', 'roman_last_name')
# Deleting field 'PersonTranslation.non_roman_first_name'
db.delete_column('people_persontranslation', 'non_roman_first_name')
# Deleting field 'PersonTranslation.non_roman_last_name'
db.delete_column('people_persontranslation', 'non_roman_last_name')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 9, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'people.link': {
'Meta': {'object_name': 'Link'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.LinkType']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Person']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'people.linktype': {
'Meta': {'ordering': "['ordering']", 'object_name': 'LinkType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '256', 'blank': 'True'})
},
'people.linktypetranslation': {
'Meta': {'object_name': 'LinkTypeTranslation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'link_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.LinkType']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'people.person': {
'Meta': {'ordering': "['ordering']", 'object_name': 'Person'},
'chosen_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'picture': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.File']", 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Role']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'})
},
'people.personpluginmodel': {
'Meta': {'object_name': 'PersonPluginModel', 'db_table': "'cmsplugin_personpluginmodel'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'display_type': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Person']"})
},
'people.persontranslation': {
'Meta': {'object_name': 'PersonTranslation'},
'bio': ('django.db.models.fields.TextField', [], {'max_length': '4000', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'non_roman_first_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'non_roman_last_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Person']"}),
'roman_first_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'roman_last_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'short_bio': ('django.db.models.fields.TextField', [], {'max_length': '512', 'blank': 'True'})
},
'people.role': {
'Meta': {'object_name': 'Role'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'people.roletranslation': {
'Meta': {'object_name': 'RoleTranslation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Role']"}),
'role_description': ('django.db.models.fields.TextField', [], {'max_length': '4000', 'blank': 'True'})
}
}
complete_apps = ['people']
|
bitmazk/django-people
|
people/south_migrations/0005_auto__add_field_persontranslation_roman_first_name__add_field_persontr.py
|
Python
|
mit
| 15,072 | 0.007564 |
import gtk
class ExtensionFeatures:
SYSTEM_WIDE = 0
class MountManagerExtension:
"""Base class for mount manager extensions.
Mount manager has only one instance and is created on program startup.
Methods defined in this class are called automatically by the mount manager
so you need to implement them.
"""
# features extension supports
features = ()
def __init__(self, parent, window):
self._parent = parent
self._window = window
self._application = self._parent._application
# create user interface
self._container = gtk.VBox(False, 5)
self._controls = gtk.HBox(False, 5)
separator = gtk.HSeparator()
# pack interface
self._container.pack_end(separator, False, False, 0)
self._container.pack_end(self._controls, False, False, 0)
def can_handle(self, uri):
"""Returns boolean denoting if specified URI can be handled by this extension"""
return False
def get_container(self):
"""Return container widget"""
return self._container
def get_information(self):
"""Returns information about extension"""
icon = None
name = None
return icon, name
def unmount(self, uri):
"""Method called by the mount manager for unmounting the selected URI"""
pass
def focus_object(self):
"""Method called by the mount manager for focusing main object"""
pass
@classmethod
def get_features(cls):
"""Returns set of features supported by extension"""
return cls.features
|
Azulinho/sunflower-file-manager-with-tmsu-tagging-support
|
application/plugin_base/mount_manager_extension.py
|
Python
|
gpl-3.0
| 1,433 | 0.032798 |
"""
Basic building blocks for generic class based views.
We don't bind behaviour to http method handlers yet,
which allows mixin classes to be composed in interesting ways.
"""
from __future__ import unicode_literals
from django.http import Http404
from rest_framework import status
from rest_framework.response import Response
from rest_framework.request import clone_request
import warnings
def _get_validation_exclusions(obj, pk=None, slug_field=None, lookup_field=None):
"""
Given a model instance, and an optional pk and slug field,
return the full list of all other field names on that model.
For use when performing full_clean on a model instance,
so we only clean the required fields.
"""
include = []
if pk:
# Pending deprecation
pk_field = obj._meta.pk
while pk_field.rel:
pk_field = pk_field.rel.to._meta.pk
include.append(pk_field.name)
if slug_field:
# Pending deprecation
include.append(slug_field)
if lookup_field and lookup_field != 'pk':
include.append(lookup_field)
return [field.name for field in obj._meta.fields if field.name not in include]
class CreateModelMixin(object):
"""
Create a model instance.
"""
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.DATA, files=request.FILES)
if serializer.is_valid():
self.pre_save(serializer.object)
self.object = serializer.save(force_insert=True)
self.post_save(self.object, created=True)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED,
headers=headers)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def get_success_headers(self, data):
try:
return {'Location': data['url']}
except (TypeError, KeyError):
return {}
class ListModelMixin(object):
"""
List a queryset.
"""
empty_error = "Empty list and '%(class_name)s.allow_empty' is False."
def list(self, request, *args, **kwargs):
self.object_list = self.filter_queryset(self.get_queryset())
# Default is to allow empty querysets. This can be altered by setting
# `.allow_empty = False`, to raise 404 errors on empty querysets.
if not self.allow_empty and not self.object_list:
warnings.warn(
'The `allow_empty` parameter is due to be deprecated. '
'To use `allow_empty=False` style behavior, You should override '
'`get_queryset()` and explicitly raise a 404 on empty querysets.',
PendingDeprecationWarning
)
class_name = self.__class__.__name__
error_msg = self.empty_error % {'class_name': class_name}
raise Http404(error_msg)
# Switch between paginated or standard style responses
page = self.paginate_queryset(self.object_list)
if page is not None:
serializer = self.get_pagination_serializer(page)
else:
serializer = self.get_serializer(self.object_list, many=True)
return Response(serializer.data)
class RetrieveModelMixin(object):
"""
Retrieve a model instance.
"""
def retrieve(self, request, *args, **kwargs):
self.object = self.get_object()
serializer = self.get_serializer(self.object)
return Response(serializer.data)
class UpdateModelMixin(object):
"""
Update a model instance.
"""
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
self.object = self.get_object_or_none()
if self.object is None:
created = True
save_kwargs = {'force_insert': True}
success_status_code = status.HTTP_201_CREATED
else:
created = False
save_kwargs = {'force_update': True}
success_status_code = status.HTTP_200_OK
serializer = self.get_serializer(self.object, data=request.DATA,
files=request.FILES, partial=partial)
if serializer.is_valid():
self.pre_save(serializer.object)
self.object = serializer.save(**save_kwargs)
self.post_save(self.object, created=created)
return Response(serializer.data, status=success_status_code)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def partial_update(self, request, *args, **kwargs):
kwargs['partial'] = True
return self.update(request, *args, **kwargs)
def get_object_or_none(self):
try:
return self.get_object()
except Http404:
if self.request.method == 'PUT':
# For PUT-as-create operation, we need to ensure that we have
# relevant permissions, as if this was a POST request. This
# will either raise a PermissionDenied exception, or simply
# return None.
self.check_permissions(clone_request(self.request, 'POST'))
else:
# PATCH requests where the object does not exist should still
# return a 404 response.
raise
def pre_save(self, obj):
"""
Set any attributes on the object that are implicit in the request.
"""
# pk and/or slug attributes are implicit in the URL.
lookup = self.kwargs.get(self.lookup_field, None)
pk = self.kwargs.get(self.pk_url_kwarg, None)
slug = self.kwargs.get(self.slug_url_kwarg, None)
slug_field = slug and self.slug_field or None
if lookup:
setattr(obj, self.lookup_field, lookup)
if pk:
setattr(obj, 'pk', pk)
if slug:
setattr(obj, slug_field, slug)
# Ensure we clean the attributes so that we don't eg return integer
# pk using a string representation, as provided by the url conf kwarg.
if hasattr(obj, 'full_clean'):
exclude = _get_validation_exclusions(obj, pk, slug_field, self.lookup_field)
obj.full_clean(exclude)
class DestroyModelMixin(object):
"""
Destroy a model instance.
"""
def destroy(self, request, *args, **kwargs):
obj = self.get_object()
obj.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
DeltaEpsilon-HackFMI2/FMICalendar-REST
|
venv/lib/python2.7/site-packages/rest_framework/mixins.py
|
Python
|
mit
| 6,556 | 0.000915 |
import cv2
import numpy as np
import os
from vilay.core.Descriptor import MediaTime, Shape
from vilay.detectors.IDetector import IDetector
from vilay.core.DescriptionScheme import DescriptionScheme
class FaceDetector(IDetector):
def getName(self):
return "Face Detector"
def initialize(self):
# define haar-detector file
print os.getcwd() + '/vilay/detectors/FaceDetector/haarcascade_frontalface_default.xml'
self.cascade = cv2.CascadeClassifier(os.getcwd() + '/vilay/detectors/FaceDetector/haarcascade_frontalface_default.xml')
def detect(self, mediaTimes, tgtDS, film, rootDS, mainGUI):
for mediaTime in mediaTimes:
for frameIdx in range(mediaTime.startTime, mediaTime.startTime + mediaTime.duration):
actFrame = film.getFrame(frameIdx)
# preprocessing
actFrame = cv2.cvtColor(actFrame, cv2.cv.CV_BGR2GRAY)
actFrame = cv2.equalizeHist(actFrame)
# detect faces
faces = self.cascade.detectMultiScale(actFrame, 1.2, 3, 0, (5,5))
# create ds and add time and shape descriptor
for faceIdx in range(len(faces)):
[x,y,width,height] = faces[faceIdx,:]
ds = DescriptionScheme('RTI', 'Face Detector')
region = Shape('Face Detector','rect', np.array([[x, y], [x + width, y + height]]))
mediaTime = MediaTime('Face Detector', frameIdx, 1)
tgtDS.addDescriptionScheme(ds)
ds.addDescriptor(region)
ds.addDescriptor(mediaTime)
|
dakot/vilay-detect
|
vilay/detectors/FaceDetector.py
|
Python
|
gpl-3.0
| 1,782 | 0.014029 |
import unittest
import warnings
warnings.filterwarnings('ignore', module=r'.*fuz.*', message='.*Sequence.*')
import sys
import os.path
sys.path.insert(1, os.path.abspath('..'))
from sickbeard import common
from sickbeard.common import Quality, WantedQualities
from sickbeard.name_parser.parser import NameParser
from six import iteritems
quality_tests = {
common.Quality.SDTV: [
'Test.Show.S01E02.PDTV.XViD-GROUP',
'Test.Show.S01E02.PDTV.x264-GROUP',
'Test.Show.S01E02.HDTV.XViD-GROUP',
'Test.Show.S01E02.HDTV.x264-GROUP',
'Test.Show.S01E02.DSR.XViD-GROUP',
'Test.Show.S01E02.DSR.x264-GROUP',
'Test.Show.S01E02.TVRip.XViD-GROUP',
'Test.Show.S01E02.TVRip.x264-GROUP',
'Test.Show.S01E02.WEBRip.XViD-GROUP',
'Test.Show.S01E02.WEBRip.x264-GROUP',
'Test.Show.S01E02.Web-Rip.x264.GROUP',
'Test.Show.S01E02.WEB-DL.x264-GROUP',
'Test.Show.S01E02.WEB-DL.AAC2.0.H.264-GROUP',
'Test.Show.S01E02 WEB-DL H 264-GROUP',
'Test.Show.S01E02_WEB-DL_H_264-GROUP',
'Test.Show.S01E02.WEB-DL.AAC2.0.H264-GROUP',
'Test.Show.S01E02.HDTV.AAC.2.0.x264-GROUP',
'Test.Show.S01E02.HDTV.DD5.1.XViD-GROUP',
'Test.Show.S01E02.HDTV.DD7.1.h.264-GROUP',
'Test.Show.S01E02.WEB-DL.DD5.1.h.264-GROUP',
'Test.Show.S01E02.WEB.h264-GROUP',
'Test.Show.S01E02.WEB.x264-GROUP',
'Test.Show.S01E02.WEB.h265-GROUP',
'Test.Show.S01E02.WEB.x265-GROUP',
'Test.Show.S01E02.WEB.VP9-GROUP',
'Test.Show.S01E02.WEB.AV1-GROUP',
'Test.Show.S01E02.WEBRip.h264-GROUP',
'Test.Show.S01E02.WEBRip.x264-GROUP'],
common.Quality.SDDVD: [
'Test.Show.S01E02.DVDRiP.XViD-GROUP',
'Test.Show.S01E02.DVDRiP.DiVX-GROUP',
'Test.Show.S01E02.DVDRiP.x264-GROUP',
'Test.Show.S01E02.DVDRip.WS.XViD-GROUP',
'Test.Show.S01E02.DVDRip.WS.DiVX-GROUP',
'Test.Show.S01E02.DVDRip.WS.x264-GROUP',
'Test.Show-S01E02-Test.Dvd Rip',
'Test.Show.S01E02.BDRIP.XViD-GROUP',
'Test.Show.S01E02.BDRIP.DiVX-GROUP',
'Test.Show.S01E02.BDRIP.x264-GROUP',
'Test.Show.S01E02.BDRIP.VP9-GROUP',
'Test.Show.S01E02.BDRIP.WS.XViD-GROUP',
'Test.Show.S01E02.BDRIP.WS.DiVX-GROUP',
'Test.Show.S01E02.BDRIP.WS.x264-GROUP'],
common.Quality.HDTV: [
'Test.Show.S01E02.720p.HDTV.x264-GROUP',
'Test.Show.S01E02.720p.HDTV.VP9-GROUP',
'Test.Show.S01E02.HR.WS.PDTV.x264-GROUP',
'Test.Show.S01E02.720p.AHDTV.x264-GROUP'],
common.Quality.RAWHDTV: [
'Test.Show.S01E02.720p.HDTV.DD5.1.MPEG2-GROUP',
'Test.Show.S01E02.1080i.HDTV.DD2.0.MPEG2-GROUP',
'Test.Show.S01E02.1080i.HDTV.H.264.DD2.0-GROUP',
'Test Show - S01E02 - 1080i HDTV MPA1.0 H.264 - GROUP',
'Test.Show.S01E02.1080i.HDTV.DD.5.1.h264-GROUP'],
common.Quality.FULLHDTV: [
'Test.Show.S01E02.1080p.HDTV.x264-GROUP',
'Test.Show.S01E02.1080p.HDTV.vp9-GROUP',
'Test.Show.S01E02.1080p.AHDTV.x264-GROUP'],
common.Quality.HDWEBDL: [
'Test.Show.S01E02.720p.WEB-DL-GROUP',
'Test.Show.S01E02.720p.WEBRip-GROUP',
'Test.Show.S01E02.WEBRip.720p.H.264.AAC.2.0-GROUP',
'Test.Show.S01E02.720p.WEB-DL.AAC2.0.H.264-GROUP',
'Test Show S01E02 720p WEB-DL AAC2 0 H 264-GROUP',
'Test_Show.S01E02_720p_WEB-DL_AAC2.0_H264-GROUP',
'Test.Show.S01E02.720p.WEB-DL.AAC2.0.H264-GROUP',
'Test.Show.S01E02.720p.iTunes.Rip.H264.AAC-GROUP',
'Test.Show.s01e02.WEBDL.720p.GROUP',
'Test Show s01e02 WEBDL 720p GROUP',
'Test Show S01E02 720p WEB-DL AVC-GROUP',
'Test.Show.S01E02.WEB-RIP.720p.GROUP',
'Test.Show.S01E02.720p.WEB.h264-GROUP',
'Test.Show.S01E02.720p.WEB.x264-GROUP',
'Test.Show.S01E02.720p.WEB.h265-GROUP',
'Test.Show.S01E02.720p.WEB.x265-GROUP',
'Test.Show.S01E02.720p.WEB.vp9-GROUP',
'Test.Show.S01E02.720p.WEBRip.h264-GROUP',
'Test.Show.S01E02.720p.WEBRip.x264-GROUP'],
common.Quality.FULLHDWEBDL: [
'Test.Show.S01E02.1080p.WEB-DL-GROUP',
'Test.Show.S01E02.1080p.WEBRip-GROUP',
'Test.Show.S01E02.WEBRip.1080p.H.264.AAC.2.0-GROUP',
'Test.Show.S01E02.WEBRip.1080p.H264.AAC.2.0-GROUP',
'Test.Show.S01E02.1080p.iTunes.H.264.AAC-GROUP',
'Test Show S01E02 1080p iTunes H 264 AAC-GROUP',
'Test_Show_S01E02_1080p_iTunes_H_264_AAC-GROUP',
'Test.Show.s01e02.WEBDL.1080p.GROUP',
'Test Show s01e02 WEBDL 1080p GROUP',
'Test Show S01E02 1080p WEB-DL AVC-GROUP',
'Test.Show.S01E02.WEB-RIP.1080p.GROUP',
'Test.Show.S01E02.1080p.WEB.h264-GROUP',
'Test.Show.S01E02.1080p.WEB.x264-GROUP',
'Test.Show.S01E02.1080p.WEB.h265-GROUP',
'Test.Show.S01E02.1080p.WEB.x265-GROUP',
'Test.Show.S01E02.1080p.WEB.VP9-GROUP',
'Test.Show.S01E02.1080p.WEBRip.h264-GROUP',
'Test.Show.S01E02.1080p.WEBRip.x264-GROUP'],
common.Quality.HDBLURAY: [
'Test.Show.S01E02.720p.BluRay.x264-GROUP',
'Test.Show.S01E02.720p.BluRay.vp9-GROUP',
'Test.Show.S01E02.720p.HDDVD.x264-GROUP',
'Test.Show.S01E02.720p.Blu-ray.x264-GROUP'],
common.Quality.FULLHDBLURAY: [
'Test.Show.S01E02.1080p.BluRay.x264-GROUP',
'Test.Show.S01E02.1080p.HDDVD.x264-GROUP',
'Test.Show.S01E02.1080p.Blu-ray.x264-GROUP',
'Test.Show.S01E02.1080p.Blu-ray.vp9-GROUP',
'Test Show S02 1080p Remux AVC FLAC 5.1'],
common.Quality.UHD4KWEB: [
'Test.Show.S01E02.2160p.WEBRip.h264-GROUP',
'Test.Show.S01E02.2160p.WEBRip.x264-GROUP',
'Test.Show.S01E02.2160p.WEBRip.x265-GROUP',
'Test.Show.S01E02.2160p.WEBRip.vp9-GROUP'],
common.Quality.UNKNOWN: ['Test.Show.S01E02-SiCKGEAR']
}
class QualityTests(unittest.TestCase):
def check_quality_names(self, quality, cases):
for fn in cases:
second = common.Quality.nameQuality(fn)
self.assertEqual(quality, second, msg='fail [%s] != [%s] for case: %s' %
(Quality.qualityStrings[quality], Quality.qualityStrings[second], fn))
def check_proper_level(self, cases, is_anime=False):
np = NameParser(False, indexer_lookup=False, testing=True)
for case, level in cases:
p = np.parse(case)
second = common.Quality.get_proper_level(p.extra_info_no_name(), p.version, is_anime)
self.assertEqual(level, second, 'fail %s != %s for case: %s' % (level, second, case))
def check_wantedquality_list(self, cases):
for show_quality, result in cases:
sq = common.Quality.combineQualities(*show_quality)
wd = common.WantedQualities()
_ = wd.get_wantedlist(sq, False, common.Quality.NONE, common.UNAIRED, manual=True)
for w, v in iteritems(wd):
if w == sq:
for u, o in sorted(iteritems(v)):
self.assertEqual(o, result.get(u))
def check_wantedquality_get_wantedlist(self, cases):
for show_quality, result in cases:
sq = common.Quality.combineQualities(*show_quality)
wd = common.WantedQualities()
for case, wlist in result:
ka = {'qualities': sq}
ka.update(case)
res = wd.get_wantedlist(**ka)
self.assertEqual(res, wlist)
def check_sceneQuality(self, cases):
msg = 'Test case: "%s", actual: [%s] != expected: [%s]'
for show_name, result in cases:
sq = common.Quality.sceneQuality(show_name[0], show_name[1])
self.assertEqual(result, sq, msg=msg % (show_name[0], Quality.qualityStrings[sq],
Quality.qualityStrings[result]))
# TODO: repack / proper ? air-by-date ? season rip? multi-ep?
def test_SDTV(self):
self.assertEqual(common.Quality.compositeStatus(common.DOWNLOADED, common.Quality.SDTV),
common.Quality.statusFromName('Test.Show.S01E02-GROUP.mkv'))
def test_qualites(self):
self.longMessage = True
for q, l in iteritems(quality_tests):
self.check_quality_names(q, l)
def test_reverse_parsing(self):
self.check_quality_names(common.Quality.SDTV, ['Test Show - S01E02 - SD TV - GROUP'])
self.check_quality_names(common.Quality.SDDVD, ['Test Show - S01E02 - SD DVD - GROUP'])
self.check_quality_names(common.Quality.HDTV, ['Test Show - S01E02 - HD TV - GROUP'])
self.check_quality_names(common.Quality.RAWHDTV, ['Test Show - S01E02 - RawHD TV - GROUP'])
self.check_quality_names(common.Quality.FULLHDTV, ['Test Show - S01E02 - 1080p HD TV - GROUP'])
self.check_quality_names(common.Quality.HDWEBDL, ['Test Show - S01E02 - 720p WEB-DL - GROUP'])
self.check_quality_names(common.Quality.FULLHDWEBDL, ['Test Show - S01E02 - 1080p WEB-DL - GROUP'])
self.check_quality_names(common.Quality.HDBLURAY, ['Test Show - S01E02 - 720p BluRay - GROUP'])
self.check_quality_names(common.Quality.FULLHDBLURAY, ['Test Show - S01E02 - 1080p BluRay - GROUP'])
self.check_quality_names(common.Quality.UNKNOWN, ['Test Show - S01E02 - Unknown - SiCKGEAR'])
def test_get_proper_level(self):
# release_name, expected level
self.check_proper_level([
('Test.Show.S01E13.PROPER.REPACK.720p.HDTV.x264-GROUP', 2),
('Test.Show.S01E13.720p.WEBRip.AAC2.0.x264-GROUP', 0),
('Test.Show.S01E13.PROPER.720p.HDTV.x264-GROUP', 1),
('Test.Show.S03E09-E10.REAL.PROPER.720p.HDTV.x264-GROUP', 2),
('Test.Show.S01E07.REAL.PROPER.1080p.WEB.x264-GROUP', 2),
('Test.Show.S13E20.REAL.REPACK.720p.HDTV.x264-GROUP', 2),
('Test.Show.S02E04.REAL.HDTV.x264-GROUP', 1),
('Test.Show.S01E10.Episode.Name.HDTV.x264-GROUP', 0),
('Test.Show.S12E10.1080p.WEB.x264-GROUP', 0),
('Test.Show.S03E01.Real.720p.WEB-DL.DD5.1.H.264-GROUP', 1),
('Test.Show.S04E06.REAL.PROPER.RERIP.720p.WEBRip.X264-GROUP', 2),
('Test.Show.S01E09.REPACK.REAL.PROPER.HDTV.XviD-GROUP.[SOMETHING].GROUP', 3),
('Test.Show.S01E13.REPACK.REAL.PROPER.720p.HDTV.x264-GROUP', 3),
('Test.Show.S01E06.The.Episode.Name.PROPER.480p.BluRay.x264-GROUP', 1),
('Test.Show.S01E19.PROPER.1080p.BluRay.x264-GROUP', 1),
('Test.Show.S01E03.REAL.PROPER.720p.BluRay.x264-GROUP', 2),
('Test.Show.S03E09.Episode.Name.720p.HDTV.x264-GROUP', 0),
('Test.Show.S02E07.PROPER.HDTV.x264-GROUP', 1),
('Test.Show.S02E12.REAL.REPACK.DSR.XviD-GROUP', 2),
('Test.Show Part2.REAL.AC3.WS DVDRip XviD-GROUP', 1),
('Test.Show.S01E02.Some.episode.title.REAL.READ.NFO.DVDRip.XviD-GROUP', 1)
])
def test_wantedQualities_List(self):
self.check_wantedquality_list([(
[(Quality.SDTV, Quality.HDTV),
(Quality.HDWEBDL, Quality.FULLHDBLURAY)],
{Quality.NONE: {
WantedQualities.wantedlist: [Quality.SDTV, Quality.HDTV],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.SDTV: {
WantedQualities.wantedlist: [Quality.HDWEBDL, Quality.FULLHDBLURAY],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.SDDVD: {
WantedQualities.wantedlist: [Quality.HDWEBDL, Quality.FULLHDBLURAY],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.HDTV: {
WantedQualities.wantedlist: [Quality.HDWEBDL, Quality.FULLHDBLURAY],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.RAWHDTV: {
WantedQualities.wantedlist: [Quality.HDWEBDL, Quality.FULLHDBLURAY],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.FULLHDTV: {
WantedQualities.wantedlist: [Quality.HDWEBDL, Quality.FULLHDBLURAY],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.HDWEBDL: {
WantedQualities.wantedlist: [Quality.FULLHDBLURAY],
WantedQualities.bothlists: False, WantedQualities.upgradelist: True},
Quality.FULLHDWEBDL: {
WantedQualities.wantedlist: [Quality.FULLHDBLURAY],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.HDBLURAY: {
WantedQualities.wantedlist: [Quality.FULLHDBLURAY],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.FULLHDBLURAY: {
WantedQualities.wantedlist: [],
WantedQualities.bothlists: False, WantedQualities.upgradelist: True},
Quality.UHD4KWEB: {
WantedQualities.wantedlist: [],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.UNKNOWN: {
WantedQualities.wantedlist: [],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False
}}),
([(Quality.SDTV, Quality.HDTV),
()],
{Quality.NONE: {
WantedQualities.wantedlist: [Quality.SDTV, Quality.HDTV],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.SDTV: {
WantedQualities.wantedlist: [],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.SDDVD: {
WantedQualities.wantedlist: [],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.HDTV: {
WantedQualities.wantedlist: [],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.RAWHDTV: {
WantedQualities.wantedlist: [],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.FULLHDTV: {
WantedQualities.wantedlist: [],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.HDWEBDL: {
WantedQualities.wantedlist: [],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.FULLHDWEBDL: {
WantedQualities.wantedlist: [],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.HDBLURAY: {
WantedQualities.wantedlist: [],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.FULLHDBLURAY: {
WantedQualities.wantedlist: [],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.UHD4KWEB: {
WantedQualities.wantedlist: [],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.UNKNOWN: {
WantedQualities.wantedlist: [],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False
}}),
([(Quality.SDTV, Quality.HDTV, Quality.HDWEBDL, Quality.HDBLURAY, Quality.FULLHDBLURAY),
(Quality.HDWEBDL, Quality.FULLHDWEBDL, Quality.FULLHDBLURAY)],
{Quality.NONE: {
WantedQualities.wantedlist: [
Quality.SDTV, Quality.HDTV, Quality.HDWEBDL, Quality.HDBLURAY, Quality.FULLHDBLURAY],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.SDTV: {
WantedQualities.wantedlist: [Quality.HDWEBDL, Quality.FULLHDWEBDL, Quality.FULLHDBLURAY],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.SDDVD: {
WantedQualities.wantedlist: [Quality.HDWEBDL, Quality.FULLHDWEBDL, Quality.FULLHDBLURAY],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.HDTV: {
WantedQualities.wantedlist: [Quality.HDWEBDL, Quality.FULLHDWEBDL, Quality.FULLHDBLURAY],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.RAWHDTV: {
WantedQualities.wantedlist: [Quality.HDWEBDL, Quality.FULLHDWEBDL, Quality.FULLHDBLURAY],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.FULLHDTV: {
WantedQualities.wantedlist: [Quality.HDWEBDL, Quality.FULLHDWEBDL, Quality.FULLHDBLURAY],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.HDWEBDL: {
WantedQualities.wantedlist: [Quality.FULLHDWEBDL, Quality.FULLHDBLURAY],
WantedQualities.bothlists: True, WantedQualities.upgradelist: True},
Quality.FULLHDWEBDL: {
WantedQualities.wantedlist: [Quality.FULLHDBLURAY],
WantedQualities.bothlists: False, WantedQualities.upgradelist: True},
Quality.HDBLURAY: {
WantedQualities.wantedlist: [Quality.FULLHDBLURAY],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.FULLHDBLURAY: {
WantedQualities.wantedlist: [],
WantedQualities.bothlists: True, WantedQualities.upgradelist: True},
Quality.UHD4KWEB: {
WantedQualities.wantedlist: [],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False},
Quality.UNKNOWN: {
WantedQualities.wantedlist: [],
WantedQualities.bothlists: False, WantedQualities.upgradelist: False
}})
])
def test_wantedQualities_get_wantedlist(self):
self.check_wantedquality_get_wantedlist([(
[(Quality.SDDVD, Quality.RAWHDTV), (Quality.HDWEBDL, Quality.HDBLURAY)],
[({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': False, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': False, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
# SDTV (below init quality)
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# SDDVD (init quality)
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# HDTV (between init qualities)
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# RAWHDTV (max init quality)
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# FULLHDTV (between init and upgrade qualities + unwanted quality)
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# HDWEBDL (upgrade quality)
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# FULLHDWEBDL (unwanted quality between upgrade qualities)
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# HDBLURAY (max upgrade quality)
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# FULLHDBLURAY (higher then max upgrade quality + unwanted quality)
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# unaired:
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': True, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': True, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': True, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
# SDTV (below init quality)
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# SDDVD (init quality)
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# HDTV (between init qualities)
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# RAWHDTV (max init quality)
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# FULLHDTV (between init and upgrade qualities + unwanted quality)
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# HDWEBDL (upgrade quality)
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# FULLHDWEBDL (unwanted quality between upgrade qualities)
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# HDBLURAY (max upgrade quality)
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# FULLHDBLURAY (higher then max upgrade quality + unwanted quality)
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# manual:
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
# SDTV (below init quality)
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# SDDVD (init quality)
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# HDTV (between init qualities)
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# RAWHDTV (max init quality)
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# FULLHDTV (between init and upgrade qualities + unwanted quality)
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# HDWEBDL (upgrade quality)
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
# FULLHDWEBDL (unwanted quality between upgrade qualities)
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
# HDBLURAY (max upgrade quality)
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# FULLHDBLURAY (higher then max upgrade quality + unwanted quality)
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# upgrade once:
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': False, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': False, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
# SDTV (below init quality)
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# SDDVD (init quality)
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# HDTV (between init qualities)
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# RAWHDTV (max init quality)
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# FULLHDTV (between init and upgrade qualities + unwanted quality)
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# HDWEBDL (upgrade quality)
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# FULLHDWEBDL (unwanted quality between upgrade qualities)
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# HDBLURAY (max upgrade quality)
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# FULLHDBLURAY (higher then max upgrade quality + unwanted quality)
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# unaired, manual:
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
# SDTV (below init quality)
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# SDDVD (init quality)
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# HDTV (between init qualities)
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# RAWHDTV (max init quality)
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# FULLHDTV (between init and upgrade qualities + unwanted quality)
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# HDWEBDL (upgrade quality)
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
# FULLHDWEBDL (unwanted quality between upgrade qualities)
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
# HDBLURAY (max upgrade quality)
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# FULLHDBLURAY (higher then max upgrade quality + unwanted quality)
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# unaired, upgrade once:
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': True, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': True, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': True, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
# SDTV (below init quality)
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# SDDVD (init quality)
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# HDTV (between init qualities)
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# RAWHDTV (max init quality)
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# FULLHDTV (between init and upgrade qualities + unwanted quality)
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# HDWEBDL (upgrade quality)
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# FULLHDWEBDL (unwanted quality between upgrade qualities)
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# HDBLURAY (max upgrade quality)
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# FULLHDBLURAY (higher then max upgrade quality + unwanted quality)
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# unaired, manual, upgrade once:
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
# SDTV (below init quality)
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# SDDVD (init quality)
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# HDTV (between init qualities)
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# RAWHDTV (max init quality)
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# FULLHDTV (between init and upgrade qualities + unwanted quality)
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# HDWEBDL (upgrade quality)
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
# FULLHDWEBDL (unwanted quality between upgrade qualities)
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
# HDBLURAY (max upgrade quality)
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# FULLHDBLURAY (higher then max upgrade quality + unwanted quality)
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# upgrade once, manual:
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
# SDTV (below init quality)
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# SDDVD (init quality)
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# HDTV (between init qualities)
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# RAWHDTV (max init quality)
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# FULLHDTV (between init and upgrade qualities + unwanted quality)
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDWEBDL, Quality.HDBLURAY]),
# HDWEBDL (upgrade quality)
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
# FULLHDWEBDL (unwanted quality between upgrade qualities)
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
# HDBLURAY (max upgrade quality)
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# FULLHDBLURAY (higher then max upgrade quality + unwanted quality)
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
]),
# init quality only show
([(Quality.SDDVD, Quality.RAWHDTV), ()],
[({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': False, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': False, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
# SDTV (below init quality)
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# SDDVD (init quality)
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# HDTV (between init qualities)
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# RAWHDTV (max init quality)
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# FULLHDTV (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# HDWEBDL (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# FULLHDWEBDL (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# HDBLURAY (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# FULLHDBLURAY (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# unaired:
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': True, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': True, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': True, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
# SDTV (below init quality)
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# SDDVD (init quality)
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# HDTV (between init qualities)
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# RAWHDTV (max init quality)
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# FULLHDTV (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# HDWEBDL (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# FULLHDWEBDL (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# HDBLURAY (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# FULLHDBLURAY (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# manual:
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
# SDTV (below init quality)
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# SDDVD (init quality)
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# HDTV (between init qualities)
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# RAWHDTV (max init quality)
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# FULLHDTV (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# HDWEBDL (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# FULLHDWEBDL (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# HDBLURAY (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# FULLHDBLURAY (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# upgrade once:
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': False, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': False, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
# SDTV (below init quality)
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# SDDVD (init quality)
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# HDTV (between init qualities)
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# RAWHDTV (max init quality)
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# FULLHDTV (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# HDWEBDL (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# FULLHDWEBDL (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# HDBLURAY (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# FULLHDBLURAY (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# unaired, manual:
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
# SDTV (below init quality)
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# SDDVD (init quality)
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# HDTV (between init qualities)
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# RAWHDTV (max init quality)
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# FULLHDTV (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# HDWEBDL (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# FULLHDWEBDL (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# HDBLURAY (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# FULLHDBLURAY (above init quality + unwanted)
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# unaired, upgrade once:
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': True, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': True, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': True, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV]),
# SDTV (below init quality)
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# SDDVD (init quality)
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# HDTV (between init qualities)
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# RAWHDTV (max init quality)
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# FULLHDTV (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# HDWEBDL (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# FULLHDWEBDL (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# HDBLURAY (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# FULLHDBLURAY (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# unaired, manual, upgrade once:
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
# SDTV (below init quality)
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# SDDVD (init quality)
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# HDTV (between init qualities)
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# RAWHDTV (max init quality)
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# FULLHDTV (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# HDWEBDL (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# FULLHDWEBDL (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# HDBLURAY (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# FULLHDBLURAY (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# upgrade once, manual:
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV]),
# SDTV (below init quality)
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# SDDVD (init quality)
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# HDTV (between init qualities)
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# RAWHDTV (max init quality)
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# FULLHDTV (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# HDWEBDL (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# FULLHDWEBDL (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# HDBLURAY (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# FULLHDBLURAY (above init quality + unwanted)
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [])
]),
# init, upgrade quality show (overlapping)
([(Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL), (Quality.RAWHDTV, Quality.HDBLURAY)],
[({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': False, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': False, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
# SDTV (below init quality)
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# SDDVD (init quality)
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# HDTV (between init qualities + unwanted quality)
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# RAWHDTV (init + upgrade quality)
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# FULLHDTV (between init qualities + unwanted quality)
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# HDWEBDL (max init quality)
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# FULLHDWEBDL (unwanted quality between upgrade qualities)
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# HDBLURAY (max upgrade quality)
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# FULLHDBLURAY (higher then max upgrade quality + unwanted quality)
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# unaired:
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': True, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': True, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': True, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
# SDTV (below init quality)
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# SDDVD (init quality)
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# HDTV (between init qualities + unwanted quality)
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# RAWHDTV (init + upgrade quality)
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# FULLHDTV (between init qualities + unwanted quality)
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# HDWEBDL (max init quality)
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# FULLHDWEBDL (unwanted quality between upgrade qualities)
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# HDBLURAY (max upgrade quality)
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# FULLHDBLURAY (higher then max upgrade quality + unwanted quality)
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# manual:
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
# SDTV (below init quality)
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
# SDDVD (init quality)
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
# HDTV (between init qualities + unwanted quality)
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
# RAWHDTV (init + upgrade quality)
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
# FULLHDTV (between init qualities + unwanted quality)
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
# HDWEBDL (upgrade quality)
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
# FULLHDWEBDL (unwanted quality between upgrade qualities)
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
# HDBLURAY (max upgrade quality)
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# FULLHDBLURAY (higher then max upgrade quality + unwanted quality)
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# upgrade once:
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': False, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': False, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
# SDTV (below init quality)
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# SDDVD (init quality)
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# HDTV (between init qualities + unwanted quality)
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# RAWHDTV (init + upgrade quality)
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# FULLHDTV (between init qualities + unwanted quality)
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# HDWEBDL (max init quality)
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# FULLHDWEBDL (unwanted quality between upgrade qualities)
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# HDBLURAY (max upgrade quality)
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# FULLHDBLURAY (higher then max upgrade quality + unwanted quality)
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': False}, []),
# unaired, manual:
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
# SDTV (below init quality)
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
# SDDVD (init quality)
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
# HDTV (between init qualities + unwanted quality)
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
# RAWHDTV (init + upgrade quality)
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
# FULLHDTV (between init qualities + unwanted quality)
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
# HDWEBDL (max init quality)
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
# FULLHDWEBDL (unwanted quality between upgrade qualities)
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': False, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
# HDBLURAY (max upgrade quality)
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# FULLHDBLURAY (higher then max upgrade quality + unwanted quality)
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': False, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# unaired, upgrade once:
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': True, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': True, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': True, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
# SDTV (below init quality)
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# SDDVD (init quality)
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# HDTV (between init qualities + unwanted quality)
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# RAWHDTV (init + upgrade quality)
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# FULLHDTV (between init qualities + unwanted quality)
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# HDWEBDL (max init quality)
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# FULLHDWEBDL (unwanted quality between upgrade qualities)
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# HDBLURAY (max upgrade quality)
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# FULLHDBLURAY (higher then max upgrade quality + unwanted quality)
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': False}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': False}, []),
# unaired, manual, upgrade once:
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': True, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
# SDTV (below init quality)
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
# SDDVD (init quality)
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
# HDTV (between init qualities + unwanted quality)
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
# RAWHDTV (init + upgrade quality)
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
# FULLHDTV (between init qualities + unwanted quality)
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
# HDWEBDL (max init quality)
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
# FULLHDWEBDL (unwanted quality between upgrade qualities)
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, [Quality.HDBLURAY]),
# HDBLURAY (max upgrade quality)
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# FULLHDBLURAY (higher then max upgrade quality + unwanted quality)
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': True, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': True, 'manual': True}, []),
# upgrade once, manual:
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.UNAIRED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.SKIPPED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.IGNORED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.WANTED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
({'upgradeonce': True, 'quality': Quality.NONE, 'status': common.FAILED,
'unaired': False, 'manual': True}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]),
# SDTV (below init quality)
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
# SDDVD (init quality)
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
# HDTV (between init qualities + unwanted quality)
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.RAWHDTV, Quality.HDBLURAY]),
# RAWHDTV (init + upgrade quality)
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
# FULLHDTV (between init qualities + unwanted quality)
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
# HDWEBDL (max init quality)
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
# FULLHDWEBDL (unwanted quality between upgrade qualities)
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, [Quality.HDBLURAY]),
# HDBLURAY (max upgrade quality)
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
# FULLHDBLURAY (higher then max upgrade quality + unwanted quality)
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED,
'unaired': False, 'manual': True}, []),
({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED,
'unaired': False, 'manual': True}, []),
]),
])
def test_sceneQuality(self):
self.longMessage = True
self.check_sceneQuality([
(('The.Show.S01E01.720p.HDTV.x264-Group', False), common.Quality.HDTV),
(('The.Show.S01E01.HDTV.x264-Group', False), common.Quality.SDTV),
(('The.Show.S01E01.x265-Group', False), common.Quality.SDTV),
(('The.Show.S02E04.DVDRip.XviD-Group', False), common.Quality.SDDVD),
(('The.Show.S02E04.DVDRip.x265-Group', False), common.Quality.SDDVD),
(('The.Show.S02E04.1080i.HDTV.MPA2.0.H.264-Group', False), common.Quality.RAWHDTV),
(('The.Show.S01E06.720p.BluRay.X264-Group ', False), common.Quality.HDBLURAY),
(('The.Show.S06E06.BluRay.1080p.DD5.1.H.265-Group', False), common.Quality.FULLHDBLURAY),
(('The.Show.S47E79.1080p.WEB.x264-Group', False), common.Quality.FULLHDWEBDL),
(('The.Show.S03E08.720p.WEB-DL.AAC5.1.H.264', False), common.Quality.HDWEBDL),
(('The Show S01E01 720p hevc-Group', False), common.Quality.HDTV),
(('The Show S01E01 720p x265-Group', False), common.Quality.HDTV),
(('The Show S01E01 720p HEVC x265-Group', False), common.Quality.HDTV),
(('The.Show.S01E01.720p.HEVC.x265-Group', False), common.Quality.HDTV),
(('The.Show.S01E01.720p.x265.HEVC-Group', False), common.Quality.HDTV),
(('The.Show.S01E01.1080p.HEVC.x265-Group', False), common.Quality.FULLHDTV),
(('The.Show.s03e11.720p.web.hevc.x265.Group', False), common.Quality.HDWEBDL),
(('The Show (15 Jan 2019) [text] 720HD mp4', False), common.Quality.HDTV),
(('The.Show.s03e11.ep.name.1080p.web.dl.hevc.x265.Group', False), common.Quality.FULLHDWEBDL),
(('The.Show.S03E05.1080p.NF.WEB-DL.DD5.1.HDR.HEVC-Group', False), common.Quality.FULLHDWEBDL),
(('The.Show.S01E10.Name.2160p.UHD.BluRay.REMUX.HDR.HEVC.DTS-HD.MA.5.1', False), common.Quality.UNKNOWN),
(('Show.S01E07.2160p.4K.UHD.10bit.NF.WEBRip.5.1.x265.HEVC-Group', False), common.Quality.UHD4KWEB),
])
for q, l in iteritems(quality_tests):
self.check_sceneQuality([((v, False), q) for v in l])
# TODO: fix these anime test cases at travis
def test_get_proper_level_anime(self):
# release_name, expected level
self.check_proper_level([
# ('Boruto - Naruto Next Generations - 59 [480p]', 0),
# ('[SGKK] Bleach - 312v2 (1280x720 h264 AAC) [F501C9BE]', 1),
# ('[SGKK] Bleach 312v1 [720p/MKV]', 0),
# ('[Cthuko] Shirobako - 05v2 [720p H264 AAC][80C9B09B]', 1),
# ('Naruto Shippuden - 314v3', 2)
], is_anime=True)
if '__main__' == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(QualityTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
SickGear/SickGear
|
tests/common_tests.py
|
Python
|
gpl-3.0
| 235,694 | 0.007259 |
def decode_args(s, delimiter="|", escapechar="\\"):
args = []
escaping = False
current_arg = ""
for c in s:
if escaping:
current_arg += c
escaping = False
elif c == escapechar:
escaping = True
elif c == delimiter:
args.append(current_arg)
current_arg = ""
else:
current_arg += c
args.append(current_arg)
return args
def encode_args(args, delimiter="|", escapechar="\\"):
encoded_args = ""
for idx, arg in enumerate(args):
if idx > 0:
encoded_args += delimiter
if not isinstance(arg, str):
arg = str(arg)
for c in arg:
if c == delimiter or c == escapechar:
encoded_args += escapechar
encoded_args += c
return encoded_args
|
zentralopensource/zentral
|
zentral/core/events/utils.py
|
Python
|
apache-2.0
| 851 | 0 |
#
# Python module to parse OMNeT++ vector files
#
# Currently only suitable for small vector files since
# everything is loaded into RAM
#
# Authors: Florian Kauer <florian.kauer@tuhh.de>
#
# Copyright (c) 2015, Institute of Telematics, Hamburg University of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Institute nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import re
import scipy.interpolate
import numpy as np
vectors = []
class OmnetVector:
def __init__(self,file_input):
self.vectors = {}
self.dataTime = {}
self.dataValues = {}
self.maxtime = 0
self.attrs = {}
for line in file_input:
m = re.search("([0-9]+)\t([0-9]+)\t([0-9.e\-+]+)\t([0-9.e\-+na]+)",line)
#m = re.search("([0-9]+)",line)
if m:
vector = int(m.group(1))
if not vector in self.dataTime:
self.dataTime[vector] = []
self.dataValues[vector] = []
time = float(m.group(3))
self.dataTime[vector].append(time)
self.maxtime = max(self.maxtime,time)
self.dataValues[vector].append(float(m.group(4)))
else:
# vector 7 Net802154.host[0].ipApp[0] referenceChangeStat:vector ETV
m = re.search("vector *([0-9]*) *([^ ]*) *(.*):vector",line)
if m:
number = int(m.group(1))
module = m.group(2)
name = m.group(3)
if not name in self.vectors:
self.vectors[name] = {}
self.vectors[name][module] = number
else:
m = re.search("attr ([^ ]*) ([^ ]*)\n",line)
if m:
self.attrs[m.group(1)] = m.group(2)
def get_vector(self,name,module,resample=None):
num = self.vectors[name][module]
(time,values) = (self.dataTime[num],self.dataValues[num])
if resample != None:
newpoints = np.arange(0,self.maxtime,resample)
lastvalue = values[-1]
return (newpoints, scipy.interpolate.interp1d(time,values,'zero',assume_sorted=True,
bounds_error=False,fill_value=(0,lastvalue)).__call__(newpoints))
else:
return (time,values)
def get_attr(self,name):
return self.attrs[name]
|
i-tek/inet_ncs
|
simulations/analysis_tools/python/omnet_vector.py
|
Python
|
gpl-3.0
| 3,886 | 0.008492 |
# Copyright (c) 2013-2016 Molly White
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from plugins.util import admin, command, humanize_list
@admin("set")
def setcommand(m):
"""Adjust or view the settings on a command."""
#- !set setting value [#channel]
#-
#- ```irc
#- < GorillaWarfare> !set link auto
#- < GorillaBot> "link" set to "auto" in ##GorillaBot.
#- ```
#-
#- Change settings for a command. Allowed and default settings for a command are viewable in
#- the command's documentation. Settings can only be edited for channels the bot is joined
#- to, or has been joined to in the past.
if len(m.line) > 4:
m.bot.private_message(m.location,
'Too many arguments. Use "!set setting value [#channel]".')
return
# Check that this channel is in our config
if len(m.line) <= 3:
chan = m.location
elif len(m.line) == 4:
if m.line[3][0] != "#":
m.bot.private_message(m.location, 'Poorly-formatted command. '
'Use "!set setting value [#channel]".')
return
chan = m.line[3]
if not chan in m.bot.configuration["chans"]:
m.bot.private_message(m.location,
"Cannot access settings for {0}. Do I know about the channel?".format(
chan))
return
# Respond to command
settings = m.bot.configuration["chans"][chan]["settings"]
if len(m.line) == 1:
# Query all settings for channel
if not settings:
m.bot.private_message(m.location, "Nothing has been set for {0}.".format(chan))
else:
m.bot.private_message(m.location, (" ".join(
map(lambda s: ('"{0}" is set to "{1}".'.format(s[0], s[1])),
iter(settings.items())))))
elif len(m.line) == 2:
# Query value of a setting in a channel
if not settings or m.line[1] not in settings:
m.bot.private_message(m.location,
'"{0}" has not been set for {1}.'.format(m.line[1], chan))
else:
m.bot.private_message(m.location,
'"{0}" set to "{1}" in {2}.'.format(m.line[1],
settings[m.line[1]], chan))
else:
setting = m.line[1].lower()
value = m.line[2].lower()
m.bot.configuration["chans"][chan]["settings"][setting] = value
m.bot.update_configuration(m.bot.configuration)
m.bot.logger.info(
'"{0}" set to "{1}" in {2} by {3}.'.format(setting, value, chan, m.sender))
m.bot.private_message(m.location, '"{0}" set to "{1}" in {2}.'.format(setting, value, chan))
@admin()
def unset(m):
"""Unset a given setting."""
#- !unset setting [#channel]
#-
#- ```irc
#- < GorillaWarfare> !unset link
#- < GorillaBot> "link" unset for ##GorillaBot.
#- ```
#-
#- Removes the setting for a channel. This will revert to the default value. Settings can only
#- be edited for channels the bot is joined to, or has been joined to in the past.
if len(m.line) != 2 and not (len(m.line) == 3 and m.line[2][0] == "#"):
m.bot.private_message(m.location,
'Poorly-formatted command. Use "!unset setting [#channel]".')
return
chan = m.location if len(m.line) == 2 else m.line[2]
if chan not in m.bot.configuration["chans"]:
m.bot.private_message(m.location,
"Cannot unset setting for {0}. Do I know about the channel?".format(
chan))
return
try:
del m.bot.configuration["chans"][chan]["settings"][m.line[1]]
m.bot.update_configuration(m.bot.configuration)
except KeyError:
# Doesn't matter if the value wasn't set to begin with
pass
m.bot.private_message(m.location, '"{0}" unset for {1}.'.format(m.line[1], chan))
|
quanticle/GorillaBot
|
gorillabot/plugins/settings.py
|
Python
|
mit
| 5,087 | 0.009043 |
from PyQt4 import QtCore
import sys
from ilastik.core.projectClass import Project
from ilastik.core.testThread import TestThread
from ilastik.modules.unsupervised_decomposition.core.unsupervisedMgr import UnsupervisedDecompositionModuleMgr
from ilastik.modules.unsupervised_decomposition.core.algorithms.unsupervisedDecompositionPLSA import UnsupervisedDecompositionPLSA
from ilastik.modules.unsupervised_decomposition.core.algorithms.unsupervisedDecompositionPCA import UnsupervisedDecompositionPCA
import unittest
from ilastik.core import jobMachine
from ilastik import __path__ as ilastikpath
from ilastik.core.testThread import setUp, tearDown
# make sure that we have a recent numpy installation, the SVD used for PCA decomposition seems to have changed, resulting in a test failure!
import numpy
numpyversion = numpy.__version__.split('.')
numpyTooOldMessage = str("Your current numpy version is too old. Is: " + numpy.__version__ + " Should Be: 1.4.0 or newer. Skipping some tests.")
numpyRecentEnough = False
if((int(numpyversion[0]) >= 1) & (int(numpyversion[1]) >= 4) & (int(numpyversion[2]) >= 0)):
numpyRecentEnough = True
#*******************************************************************************
# U n s u p e r v i s e d D e c o m p o s i t i o n T e s t P r o j e c t *
#*******************************************************************************
class UnsupervisedDecompositionTestProject(object):
# this class is used to set up a default project which is then used for testing functionality,
# hopefully, this will reduced code redundancy
def __init__(self, image_filename, unsupervisedMethod = None, numComponents = None):
self.image_filename = image_filename
self.tolerance = 0.01 # maximum derivation per pixel
self.testdir = ilastikpath[0] + "/testdata/unsupervised_decomposition/"
# create project
self.project = Project('Project Name', 'Labeler', 'Description')
self.dataMgr = self.project.dataMgr
# create file list and load data
path = str(self.testdir + self.image_filename) # the image is not really used since we load the threshold overlay from a file, however, we need it to set the correct dimensions
fileList = []
fileList.append(path)
self.project.addFile(fileList)
# create automatic segmentation manager
self.unsupervisedMgr = UnsupervisedDecompositionModuleMgr(self.dataMgr)
# setup inputs
self.inputOverlays = []
self.inputOverlays.append(self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr["Raw Data"])
# use default decomposer
if unsupervisedMethod is None:
self.unsupervisedMethod = self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod
else:
self.unsupervisedMethod = self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod = unsupervisedMethod
if numComponents is not None:
self.unsupervisedMethod.setNumberOfComponents(numComponents)
self.numIterations = numComponents
else:
self.numIterations = self.unsupervisedMethod.numComponents
# overlay lists and filenames
self.listOfResultOverlays = []
self.listOfFilenames = []
for i in range(self.numIterations):
self.listOfResultOverlays.append(str("Unsupervised/" + self.unsupervisedMethod.shortname + " component %d" % (i+1)))
filename = str(self.testdir + "gt_" + self.unsupervisedMethod.shortname + "_result_component_%d.h5" % (i+1))
print filename
self.listOfFilenames.append(filename)
#*******************************************************************************
# T e s t W h o l e M o d u l e D e f a u l t D e c o m p o s e r *
#*******************************************************************************
class TestWholeModuleDefaultDecomposer(unittest.TestCase): # use default decomposer
if not numpyRecentEnough:
__test__ = False
def setUp(self):
self.app = QtCore.QCoreApplication(sys.argv) # we need a QCoreApplication to run, otherwise the thread just gets killed
self.testProject = UnsupervisedDecompositionTestProject("sims_aligned_s7_32.h5")
def test_WholeModule(self):
t = QtCore.QTimer()
t.setSingleShot(True)
t.setInterval(0)
self.app.connect(t, QtCore.SIGNAL('timeout()'), self.mainFunction)
t.start()
self.app.exec_()
def mainFunction(self):
self.testThread = TestThread(self.testProject.unsupervisedMgr, self.testProject.listOfResultOverlays, self.testProject.listOfFilenames, self.testProject.tolerance)
QtCore.QObject.connect(self.testThread, QtCore.SIGNAL('done()'), self.finalizeTest)
self.testThread.start(self.testProject.inputOverlays)
self.numOverlaysBefore = len(self.testProject.dataMgr[self.testProject.dataMgr._activeImageNumber].overlayMgr.keys())
def finalizeTest(self):
# results comparison
self.assertEqual(self.testThread.passedTest, True)
self.app.quit()
#*******************************************************************************
# T e s t W h o l e M o d u l e P C A D e c o m p o s e r *
#*******************************************************************************
class TestWholeModulePCADecomposer(unittest.TestCase): # use PCA decomposer with 3 components
if not numpyRecentEnough:
__test__ = False
def setUp(self):
#print "setUp"
self.app = QtCore.QCoreApplication(sys.argv) # we need a QCoreApplication to run, otherwise the thread just gets killed
self.numComponents = 3
self.testProject = UnsupervisedDecompositionTestProject("sims_aligned_s7_32.h5", UnsupervisedDecompositionPCA, self.numComponents)
def test_WholeModule(self):
t = QtCore.QTimer()
t.setSingleShot(True)
t.setInterval(0)
self.app.connect(t, QtCore.SIGNAL('timeout()'), self.mainFunction)
t.start()
self.app.exec_()
def mainFunction(self):
self.testThread = TestThread(self.testProject.unsupervisedMgr, self.testProject.listOfResultOverlays, self.testProject.listOfFilenames, self.testProject.tolerance)
QtCore.QObject.connect(self.testThread, QtCore.SIGNAL('done()'), self.finalizeTest)
self.testThread.start(self.testProject.inputOverlays)
self.numOverlaysBefore = len(self.testProject.dataMgr[self.testProject.dataMgr._activeImageNumber].overlayMgr.keys())
def finalizeTest(self):
'''for i in range(self.testProject.unsupervisedMethod.numComponents):
print "*************************************"
print self.testProject.listOfResultOverlays[i]
obtained = self.testProject.dataMgr[self.testProject.dataMgr._activeImageNumber].overlayMgr[self.testProject.listOfResultOverlays[i]]
from ilastik.core import dataImpex
dataImpex.DataImpex.exportOverlay(str("c:/gt_PCA_result_component_%d" % (i+1)), "h5", obtained)'''
# results comparison
self.assertEqual(self.testThread.passedTest, True)
# other conditions
# exactly self.numComponents computed overlays + self.numComponents ground truth overlays were added
self.numOverlaysAfter = len(self.testProject.dataMgr[self.testProject.dataMgr._activeImageNumber].overlayMgr.keys())
self.assertEqual(self.numOverlaysAfter - self.numOverlaysBefore, self.numComponents*2)
self.app.quit()
#*******************************************************************************
# T e s t W h o l e M o d u l e P L S A D e c o m p o s e r *
#*******************************************************************************
class TestWholeModulePLSADecomposer(unittest.TestCase): # pLSA with 5 components
def setUp(self):
#print "setUp"
self.app = QtCore.QCoreApplication(sys.argv)
self.numComponents = 5
self.testProject = UnsupervisedDecompositionTestProject("sims_aligned_s7_32.h5", UnsupervisedDecompositionPLSA, self.numComponents)
def test_WholeModule(self):
t = QtCore.QTimer()
t.setSingleShot(True)
t.setInterval(0)
self.app.connect(t, QtCore.SIGNAL('timeout()'), self.mainFunction)
t.start()
self.app.exec_()
def mainFunction(self):
# fix random seed
from ilastik.core.randomSeed import RandomSeed
RandomSeed.setRandomSeed(42)
self.testThread = TestThread(self.testProject.unsupervisedMgr, self.testProject.listOfResultOverlays, self.testProject.listOfFilenames, self.testProject.tolerance)
QtCore.QObject.connect(self.testThread, QtCore.SIGNAL('done()'), self.finalizeTest)
self.testThread.start(self.testProject.inputOverlays)
self.numOverlaysBefore = len(self.testProject.dataMgr[self.testProject.dataMgr._activeImageNumber].overlayMgr.keys())
def finalizeTest(self):
'''for i in range(self.testProject.unsupervisedMethod.numComponents):
obtained = self.testProject.dataMgr[self.testProject.dataMgr._activeImageNumber].overlayMgr[self.testProject.listOfResultOverlays[i]]
from ilastik.core import dataImpex
dataImpex.DataImpex.exportOverlay(str("c:/gt_pLSA_result_component_%d" % (i+1)), "h5", obtained)'''
# results comparison
self.assertEqual(self.testThread.passedTest, True)
# exactly self.numComponents computed overlays + self.numComponents ground truth overlays were added
self.numOverlaysAfter = len(self.testProject.dataMgr[self.testProject.dataMgr._activeImageNumber].overlayMgr.keys())
self.assertEqual(self.numOverlaysAfter - self.numOverlaysBefore, self.numComponents*2)
self.app.quit()
#*******************************************************************************
# T e s t E t c *
#*******************************************************************************
class TestEtc(unittest.TestCase): # test additional functionality
def test_Etc(self):
# check that wrong numbers of components are reset to a valid value in {1, ..., numComponents}
numChannels = 10
decomposer = UnsupervisedDecompositionPCA()
components = decomposer.checkNumComponents(numChannels, 100)
assert((components <= numChannels) & (components >= 1))
components = decomposer.checkNumComponents(numChannels, 0)
print components
assert((components <= numChannels) & (components >= 1))
#*******************************************************************************
# i f _ _ n a m e _ _ = = " _ _ m a i n _ _ " *
#*******************************************************************************
if __name__ == "__main__":
unittest.main()
|
ilastik/ilastik-0.5
|
ilastik/modules/unsupervised_decomposition/core/testModule.py
|
Python
|
bsd-2-clause
| 11,269 | 0.011625 |
from django.utils.translation import ugettext_lazy as _ugl
default_app_config = 'django_sendgrid_parse.apps.DjangoSendgridParseAppConfig'
|
letops/django-sendgrid-parse
|
django_sendgrid_parse/__init__.py
|
Python
|
mit
| 139 | 0 |
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Generate code that updates the source code line.
"""
def getCurrentLineNumberCode(context):
frame_handle = context.getFrameHandle()
if frame_handle is None:
return ""
else:
source_ref = context.getCurrentSourceCodeReference()
if source_ref.isInternal():
return ""
else:
return str(source_ref.getLineNumber())
def getLineNumberUpdateCode(context):
lineno_value = getCurrentLineNumberCode(context)
if lineno_value:
frame_handle = context.getFrameHandle()
return "%s->m_frame.f_lineno = %s;" % (frame_handle, lineno_value)
else:
return ""
def getErrorLineNumberUpdateCode(context):
(
_exception_type,
_exception_value,
_exception_tb,
exception_lineno,
) = context.variable_storage.getExceptionVariableDescriptions()
lineno_value = getCurrentLineNumberCode(context)
if lineno_value:
return "%s = %s;" % (exception_lineno, lineno_value)
else:
return ""
def emitErrorLineNumberUpdateCode(emit, context):
update_code = getErrorLineNumberUpdateCode(context)
if update_code:
emit(update_code)
def emitLineNumberUpdateCode(expression, emit, context):
context.setCurrentSourceCodeReference(expression.getCompatibleSourceReference())
code = getLineNumberUpdateCode(context)
if code:
emit(code)
def getSetLineNumberCodeRaw(to_name, emit, context):
assert context.getFrameHandle() is not None
emit("%s->m_frame.f_lineno = %s;" % (context.getFrameHandle(), to_name))
def getLineNumberCode(to_name, emit, context):
assert context.getFrameHandle() is not None
emit("%s = %s->m_frame.f_lineno;" % (to_name, context.getFrameHandle()))
|
kayhayen/Nuitka
|
nuitka/codegen/LineNumberCodes.py
|
Python
|
apache-2.0
| 2,550 | 0.000392 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
###############################################################################
# Module to define chemical reaction functionality
###############################################################################
from math import exp, log
import sqlite3
from numpy import polyval
from scipy.optimize import fsolve
from PyQt4.QtGui import QApplication
from lib import unidades
from lib.sql import databank_name
class Reaction(object):
"""Chemical reaction object"""
status = 0
msg = QApplication.translate("pychemqt", "undefined")
error = 0
kwargs = {"comp": [],
"coef": [],
"tipo": 0,
"fase": 0,
"key": 0,
"base": 0,
"customHr": False,
"Hr": 0.0,
"formula": False,
"conversion": None,
"keq": None}
kwargsValue = ("Hr",)
kwargsList = ("tipo", "fase", "key", "base")
kwargsCheck = ("customHr", "formula")
calculateValue = ("DeltaP", "DeltaP_f", "DeltaP_ac", "DeltaP_h",
"DeltaP_v", "DeltaP_100ft", "V", "f", "Re", "Tout")
TEXT_TYPE = [QApplication.translate("pychemqt", "Estequiometric"),
QApplication.translate("pychemqt", "Equilibrium"),
QApplication.translate("pychemqt", "Kinetic"),
QApplication.translate("pychemqt", "Catalitic")]
TEXT_PHASE = [QApplication.translate("pychemqt", "Global"),
QApplication.translate("pychemqt", "Liquid"),
QApplication.translate("pychemqt", "Gas")]
TEXT_BASE = [QApplication.translate("pychemqt", "Mole"),
QApplication.translate("pychemqt", "Mass"),
QApplication.translate("pychemqt", "Partial pressure")]
def __init__(self, **kwargs):
"""constructor, kwargs keys can be:
comp: array with index of reaction components
coef: array with stequiometric coefficient for each component
fase: Phase where reaction work
0 - Global
1 - Liquid
2 - Gas
key: Index of key component
base
0 - Mol
1 - Mass
2 - Partial pressure
Hr: Heat of reaction, calculate from heat of formation if no input
formula: boolean to show compound names in formules
tipo: Kind of reaction
0 - Stequiometric, without equilibrium or kinetic calculations
1 - Equilibrium, without kinetic calculation
2 - Equilibrium by minimization of Gibbs free energy
3 - Kinetic
4 - Catalytic
conversion: conversion value for reaction with tipo=0
keq: equilibrium constant for reation with tipo=1
-it is float if it don't depend with temperature
-it is array if it depends with temperature
"""
self.kwargs = Reaction.kwargs.copy()
if kwargs:
self.__call__(**kwargs)
def __call__(self, **kwargs):
oldkwargs = self.kwargs.copy()
self.kwargs.update(kwargs)
if oldkwargs != self.kwargs and self.isCalculable:
self.calculo()
@property
def isCalculable(self):
self.msg = ""
self.status = 1
if not self.kwargs["comp"]:
self.msg = QApplication.translate("pychemqt", "undefined components")
self.status = 0
return
if not self.kwargs["coef"]:
self.msg = QApplication.translate("pychemqt", "undefined stequiometric")
self.status = 0
return
if self.kwargs["tipo"] == 0:
if self.kwargs["conversion"] is None:
self.msg = QApplication.translate("pychemqt", "undefined conversion")
self.status = 3
elif self.kwargs["tipo"] == 1:
if self.kwargs["keq"] is None:
self.msg = QApplication.translate("pychemqt", "undefined equilibrium constants")
self.status = 3
elif self.kwargs["tipo"] == 2:
pass
elif self.kwargs["tipo"] == 3:
pass
return True
def calculo(self):
self.componentes = self.kwargs["comp"]
self.coef = self.kwargs["coef"]
self.tipo = self.kwargs["tipo"]
self.base = self.kwargs["base"]
self.fase = self.kwargs["fase"]
self.calor = self.kwargs["Hr"]
self.formulas = self.kwargs["formula"]
self.keq = self.kwargs["keq"]
databank = sqlite3.connect(databank_name).cursor()
databank.execute("select nombre, peso_molecular, formula, \
calor_formacion_gas from compuestos where id IN \
%s" % str(tuple(self.componentes)))
nombre = []
peso_molecular = []
formula = []
calor_reaccion = 0
check_estequiometria = 0
for i, compuesto in enumerate(databank):
nombre.append(compuesto[0])
peso_molecular.append(compuesto[1])
formula.append(compuesto[2])
calor_reaccion += compuesto[3]*self.coef[i]
check_estequiometria += self.coef[i]*compuesto[1]
self.nombre = nombre
self.peso_molecular = peso_molecular
self.formula = formula
if self.calor:
self.Hr = self.kwargs.get("Hr", 0)
else:
self.Hr = unidades.MolarEnthalpy(calor_reaccion/abs(
self.coef[self.base]), "Jkmol")
self.error = round(check_estequiometria, 1)
self.state = self.error == 0
self.text = self._txt(self.formulas)
def conversion(self, corriente, T):
"""Calculate reaction conversion
corriente: Corriente instance for reaction
T: Temperature of reaction"""
if self.tipo == 0:
# Material balance without equilibrium or kinetics considerations
alfa = self.kwargs["conversion"]
elif self.tipo == 1:
# Chemical equilibrium without kinetics
if isinstance(self.keq, list):
A, B, C, D, E, F, G, H = self.keq
keq = exp(A+B/T+C*log(T)+D*T+E*T**2+F*T**3+G*T**4+H*T**5)
else:
keq = self.keq
def f(alfa):
conc_out = [
(corriente.caudalunitariomolar[i]+alfa*self.coef[i])
/ corriente.Q.m3h for i in range(len(self.componentes))]
productorio = 1
for i in range(len(self.componentes)):
productorio *= conc_out[i]**self.coef[i]
return keq-productorio
alfa = fsolve(f, 0.5)
print alfa, f(alfa)
avance = alfa*self.coef[self.base]*corriente.caudalunitariomolar[self.base]
Q_out = [corriente.caudalunitariomolar[i]+avance*self.coef[i] /
self.coef[self.base] for i in range(len(self.componentes))]
minimo = min(Q_out)
if minimo < 0:
# The key component is not correct, redo the result
indice = Q_out.index(minimo)
avance = self.coef[indice]*corriente.caudalunitariomolar[indice]
Q_out = [corriente.caudalunitariomolar[i]+avance*self.coef[i] /
self.coef[indice] for i in range(len(self.componentes))]
h = unidades.Power(self.Hr*self.coef[self.base] /
self.coef[indice]*avance, "Jh")
else:
h = unidades.Power(self.Hr*avance, "Jh")
print alfa, avance
caudal = sum(Q_out)
fraccion = [caudal_i/caudal for caudal_i in Q_out]
return fraccion, h
# def cinetica(self, tipo, Ko, Ei):
# """Método que define la velocidad de reacción"""
#
#
def _txt(self, nombre=False):
"""Function to get text representation for reaction"""
if nombre:
txt = self.nombre
else:
txt = self.formula
reactivos = []
productos = []
for i in range(len(self.componentes)):
if self.coef[i] == int(self.coef[i]):
self.coef[i] = int(self.coef[i])
if self.coef[i] < -1:
reactivos.append(str(-self.coef[i])+txt[i])
elif self.coef[i] == -1:
reactivos.append(txt[i])
elif -1 < self.coef[i] < 0:
reactivos.append(str(-self.coef[i])+txt[i])
elif 0 < self.coef[i] < 1:
productos.append(str(self.coef[i])+txt[i])
elif self.coef[i] == 1:
productos.append(txt[i])
elif self.coef[i] > 1:
productos.append(str(self.coef[i])+txt[i])
return " + ".join(reactivos)+" ---> "+" + ".join(productos)
def __repr__(self):
if self.status:
eq = self._txt()
return eq + " " + "Hr= %0.4e Jkmol" % self.Hr
else:
return str(self.msg)
if __name__ == "__main__":
# from lib.corriente import Corriente, Mezcla
# mezcla=Corriente(300, 1, 1000, Mezcla([1, 46, 47, 62], [0.03, 0.01, 0.96, 0]))
# reaccion=Reaction([1, 46, 47, 62], [-2, 0, -1, 2], base=2)
# reaccion.conversion(mezcla)
# print reaccion
reaccion = Reaction(comp=[1, 47, 62], coef=[-2, -1, 2])
print reaccion
|
edusegzy/pychemqt
|
lib/reaction.py
|
Python
|
gpl-3.0
| 9,477 | 0.001478 |
import unicodedata
import re
class PathExtension:
"""
Enables readable url path names instead of ids for object traversal.
Names are stored as meta.pool_filename and generated from
title by default. Automatic generation can be disabled by setting
*meta.customfilename* to False for each object.
Extensions like *.html* are not stored. Path matching works independent
from extensions.
"""
maxlength = 55 # max path length
containerNamespace = True # unique filenames for container or global
extension = None
def Init(self):
if self.id == 0:
# skip roots
return
self.ListenEvent("commit", "TitleToFilename")
self._SetName()
def TitleToFilename(self, **kw):
"""
Uses title for filename
"""
customfilename = self.data.get("customfilename", None) # might not exist
if customfilename:
self._SetName()
return
# create url compatible filename from title
filename = self.EscapeFilename(self.meta.title)
# make unique filename
filename = self.UniqueFilename(filename)
if self.AddExtension(filename) == self.meta.pool_filename:
# no change
return
if filename:
# update
self.meta["pool_filename"] = self.AddExtension(filename)
else:
# reset filename
self.meta["pool_filename"] = ""
self._SetName()
self.Signal("pathupdate", path=self.meta["pool_filename"])
def UniqueFilename(self, name):
"""
Converts name to valid path/url
"""
if name == "file":
name = "file_"
if self.containerNamespace:
unitref = self.parent.id
else:
unitref = None
cnt = 1
root = self.root
while root.search.FilenameToID(self.AddExtension(name), unitref, parameter=dict(id=self.id), operators=dict(id="!=")) != 0:
if cnt>1:
name = name.rstrip("1234567890-")
name = name+"-"+str(cnt)
cnt += 1
return name
def EscapeFilename(self, path):
"""
Converts name to valid path/url
Path length between *self.maxlength-20* and *self.maxlength* chars. Tries to cut longer names at spaces.
(based on django's slugify)
"""
path = unicodedata.normalize("NFKD", path).encode("ascii", "ignore")
path = path.decode("utf-8")
path = re.sub('[^\w\s-]', '', path).strip().lower()
path = re.sub('[-\s]+', '_', path)
# avoid ids as filenames
try:
int(path)
path += "_n"
except:
pass
# cut long filenames
cutlen = 20
if len(path) <= self.maxlength:
return path
# cut at '_'
pos = path[self.maxlength-cutlen:].find("_")
if pos > cutlen:
# no '_' found. cut at maxlength.
return path[:self.maxlength]
return path[:self.maxlength-cutlen+pos]
def AddExtension(self, filename):
if not self.extension:
return filename
return "%s.%s" % (filename, self.extension)
# system functions -----------------------------------------------------------------
def __getitem__(self, id):
"""
Traversal lookup based on object.pool_filename and object.id. Trailing extensions
are ignored if self.extension is None.
`file` is a reserved name and used in the current object to map file downloads.
"""
if id == "file":
raise KeyError(id)
if self.extension is None:
id = id.split(".")
if len(id)>2:
id = (".").join(id[:-1])
else:
id = id[0]
try:
id = int(id)
except ValueError:
name = id
id = 0
if name:
id = self.root.search.FilenameToID(name, self.id)
if not id:
raise KeyError(id)
obj = self.GetObj(id)
if obj is None:
raise KeyError(id)
return obj
def _SetName(self):
self.__name__ = self.meta["pool_filename"]
if not self.__name__:
self.__name__ = str(self.id)
class RootPathExtension(object):
"""
Extension for nive root objects to handle alternative url names
"""
extension = None
# system functions -----------------------------------------------------------------
def __getitem__(self, id):
"""
Traversal lookup based on object.pool_filename and object.id. Trailing extensions
are ignored.
`file` is a reserved name and used in the current object to map file downloads.
"""
if id == "file":
raise KeyError(id)
if self.extension is None:
id = id.split(".")
if len(id)>2:
id = (".").join(id[:-1])
else:
id = id[0]
try:
id = int(id)
except:
name = id
id = 0
if name:
id = self.search.FilenameToID(name, self.id)
if not id:
raise KeyError(id)
obj = self.GetObj(id)
if not obj:
raise KeyError(id)
return obj
class PersistentRootPath(object):
"""
Extension for nive root objects to handle alternative url names
"""
def Init(self):
self.ListenEvent("commit", "UpdateRouting")
self.ListenEvent("dataloaded", "UpdateRouting")
self.UpdateRouting()
def UpdateRouting(self, **kw):
# check url name of root
if self.meta.get("pool_filename"):
name = self.meta.get("pool_filename")
if name != self.__name__:
# close cached root
self.app._CloseRootObj(name=self.__name__)
# update __name__ and hash
self.__name__ = str(name)
self.path = name
# unique root id generated from name . negative integer.
self.idhash = abs(hash(self.__name__))*-1
from nive.tool import Tool, ToolView
from nive.definitions import ToolConf, FieldConf, ViewConf, IApplication
tool_configuration = ToolConf(
id = "rewriteFilename",
context = "nive.extensions.path.RewriteFilenamesTool",
name = "Rewrite pool_filename based on title",
description = "Rewrites all or empty filenames based on form selection.",
apply = (IApplication,),
mimetype = "text/html",
data = [
FieldConf(id="types", datatype="checkbox", default="", settings=dict(codelist="types"), name="Object types", description=""),
FieldConf(id="testrun", datatype="bool", default=1, name="Testrun, no commits", description=""),
FieldConf(id="resetall", datatype="string", default="", size=15, name="Reset all filenames", description="<b>Urls will change! Enter 'reset all'</b>"),
FieldConf(id="tag", datatype="string", default="rewriteFilename", hidden=1)
],
views = [
ViewConf(name="", view=ToolView, attr="form", permission="admin", context="nive.extensions.path.RewriteFilenamesTool")
]
)
class RewriteFilenamesTool(Tool):
def _Run(self, **values):
parameter = dict()
if values.get("resetall")!="reset all":
parameter["pool_filename"] = ""
if values.get("types"):
tt = values.get("types")
if not isinstance(tt, list):
tt = [tt]
parameter["pool_type"] = tt
operators = dict(pool_type="IN", pool_filename="=")
fields = ("id", "title", "pool_type", "pool_filename")
root = self.app.root
recs = root.search.Search(parameter, fields, max=10000, operators=operators, sort="id", ascending=0)
if len(recs["items"]) == 0:
return "<h2>None found!</h2>", False
user = values["original"]["user"]
testrun = values["testrun"]
result = []
cnt = 0
for rec in recs["items"]:
obj = root.LookupObj(rec["id"])
if obj is None or not hasattr(obj, "TitleToFilename"):
continue
filename = obj.meta["pool_filename"]
obj.TitleToFilename()
if filename!=obj.meta["pool_filename"]:
result.append(filename+" <> "+obj.meta["pool_filename"])
if testrun==False:
obj.dbEntry.Commit(user=user)
#obj.CommitInternal(user=user)
cnt += 1
return "OK. %d filenames updated, %d different!<br>%s" % (cnt, len(result), "<br>".join(result)), True
|
nive/nive
|
nive/extensions/path.py
|
Python
|
gpl-3.0
| 8,830 | 0.008041 |
# Copyright (C) 2016 Red Hat, Inc., Bryn M. Reeves <bmr@redhat.com>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.plugins import Plugin, RedHatPlugin
class Dracut(Plugin, RedHatPlugin):
""" Dracut initramfs generator """
plugin_name = "dracut"
packages = ("dracut",)
def setup(self):
self.add_copy_spec([
"/etc/dracut.conf",
"/etc/dracut.conf.d"
])
self.add_cmd_output([
"dracut --list-modules",
"dracut --print-cmdline"
])
# vim: set et ts=4 sw=4 :
|
nijinashok/sos
|
sos/plugins/dracut.py
|
Python
|
gpl-2.0
| 862 | 0 |
# #######
# Copyright (c) 2018-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oauth2client import GOOGLE_TOKEN_URI
from oauth2client.client import GoogleCredentials
from .. import gcp
from .. import constants
class CloudResourcesBase(gcp.GoogleCloudApi):
def __init__(self,
config,
logger,
scope=constants.COMPUTE_SCOPE,
discovery=constants.CLOUDRESOURCES_DISCOVERY,
api_version=constants.API_V1):
super(CloudResourcesBase, self).__init__(
config,
logger,
scope,
discovery,
api_version)
def get_credentials(self, scope):
# check
# run: gcloud beta auth application-default login
# look to ~/.config/gcloud/application_default_credentials.json
credentials = GoogleCredentials(
access_token=None,
client_id=self.auth['client_id'],
client_secret=self.auth['client_secret'],
refresh_token=self.auth['refresh_token'],
token_expiry=None,
token_uri=GOOGLE_TOKEN_URI,
user_agent='Python client library'
)
return credentials
def get(self):
raise NotImplementedError()
def create(self):
raise NotImplementedError()
def delete(self):
raise NotImplementedError()
|
cloudify-cosmo/cloudify-gcp-plugin
|
cloudify_gcp/admin/__init__.py
|
Python
|
apache-2.0
| 1,945 | 0 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/armor/shared_arm_reward_alderaan_elite.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","armor_reward_alderaan_elite")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/ship/components/armor/shared_arm_reward_alderaan_elite.py
|
Python
|
mit
| 494 | 0.044534 |
#!/usr/bin/env python
#
# This code is a part of `ardrone_autopilot` project
# which is distributed under the MIT license.
# See `LICENSE` file for details.
#
"""
This node is based on `base.py`. See there a documentation.
Inputs
------
* in/image -- main picture stream.
Outputs
-------
* out/image -- result image.
Parameters
----------
* ~show = False [bool] -- show the result instead of publishing it.
* ~encoding = "bgr8" [str] -- video encoding used by bridge.
"""
import rospy
import cv2
import tf
from tf.transformations import quaternion_matrix
import numpy as np
import image_geometry
import math
from base import BaseStreamHandler
class Show(BaseStreamHandler):
def __init__(self, *args, **kwargs):
self.tf = tf.TransformListener()
self.camera_model = image_geometry.PinholeCameraModel()
super(Show, self).__init__(*args, **kwargs)
def on_image(self, img):
if self.info is None:
return
self.camera_model.fromCameraInfo(self.info)
# self.camera_model.rectifyImage(img, img)
self.tf.waitForTransform('ardrone/odom',
'ardrone/ardrone_base_frontcam',
rospy.Time(0),
rospy.Duration(3))
trans, rot = self.tf.lookupTransform('ardrone/odom',
'ardrone/ardrone_base_frontcam',
rospy.Time(0))
rot_matrix = np.array(quaternion_matrix(rot))
for a in range(0, 360, 30):
vector = np.array(np.array([0.1 * math.cos(a * math.pi / 180), 0.1 * math.sin(a * math.pi / 180), 0, 0]))
point = vector.dot(rot_matrix)
x, y = self.camera_model.project3dToPixel(point)
cv2.circle(img, (int(x), int(y)), 5, (0, 0, 255), -1)
return img
if __name__ == "__main__":
Show.launch_node()
|
AmatanHead/ardrone-autopilot
|
nodes_opencv/frame.py
|
Python
|
mit
| 1,941 | 0.00103 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from functools import lru_cache
import inspect
import pickle
import pytest
import random
import textwrap
import numpy as np
import pyarrow as pa
import pyarrow.compute as pc
all_array_types = [
('bool', [True, False, False, True, True]),
('uint8', np.arange(5)),
('int8', np.arange(5)),
('uint16', np.arange(5)),
('int16', np.arange(5)),
('uint32', np.arange(5)),
('int32', np.arange(5)),
('uint64', np.arange(5, 10)),
('int64', np.arange(5, 10)),
('float', np.arange(0, 0.5, 0.1)),
('double', np.arange(0, 0.5, 0.1)),
('string', ['a', 'b', None, 'ddd', 'ee']),
('binary', [b'a', b'b', b'c', b'ddd', b'ee']),
(pa.binary(3), [b'abc', b'bcd', b'cde', b'def', b'efg']),
(pa.list_(pa.int8()), [[1, 2], [3, 4], [5, 6], None, [9, 16]]),
(pa.large_list(pa.int16()), [[1], [2, 3, 4], [5, 6], None, [9, 16]]),
(pa.struct([('a', pa.int8()), ('b', pa.int8())]), [
{'a': 1, 'b': 2}, None, {'a': 3, 'b': 4}, None, {'a': 5, 'b': 6}]),
]
exported_functions = [
func for (name, func) in sorted(pc.__dict__.items())
if hasattr(func, '__arrow_compute_function__')]
exported_option_classes = [
cls for (name, cls) in sorted(pc.__dict__.items())
if (isinstance(cls, type) and
cls is not pc.FunctionOptions and
issubclass(cls, pc.FunctionOptions))]
numerical_arrow_types = [
pa.int8(),
pa.int16(),
pa.int64(),
pa.uint8(),
pa.uint16(),
pa.uint64(),
pa.float32(),
pa.float64()
]
def test_exported_functions():
# Check that all exported concrete functions can be called with
# the right number of arguments.
# Note that unregistered functions (e.g. with a mismatching name)
# will raise KeyError.
functions = exported_functions
assert len(functions) >= 10
for func in functions:
args = [object()] * func.__arrow_compute_function__['arity']
with pytest.raises(TypeError,
match="Got unexpected argument type "
"<class 'object'> for compute function"):
func(*args)
def test_exported_option_classes():
classes = exported_option_classes
assert len(classes) >= 10
for cls in classes:
# Option classes must have an introspectable constructor signature,
# and that signature should not have any *args or **kwargs.
sig = inspect.signature(cls)
for param in sig.parameters.values():
assert param.kind not in (param.VAR_POSITIONAL,
param.VAR_KEYWORD)
def test_list_functions():
assert len(pc.list_functions()) > 10
assert "add" in pc.list_functions()
def _check_get_function(name, expected_func_cls, expected_ker_cls,
min_num_kernels=1):
func = pc.get_function(name)
assert isinstance(func, expected_func_cls)
n = func.num_kernels
assert n >= min_num_kernels
assert n == len(func.kernels)
assert all(isinstance(ker, expected_ker_cls) for ker in func.kernels)
def test_get_function_scalar():
_check_get_function("add", pc.ScalarFunction, pc.ScalarKernel, 8)
def test_get_function_vector():
_check_get_function("unique", pc.VectorFunction, pc.VectorKernel, 8)
def test_get_function_aggregate():
_check_get_function("mean", pc.ScalarAggregateFunction,
pc.ScalarAggregateKernel, 8)
def test_call_function_with_memory_pool():
arr = pa.array(["foo", "bar", "baz"])
indices = np.array([2, 2, 1])
result1 = arr.take(indices)
result2 = pc.call_function('take', [arr, indices],
memory_pool=pa.default_memory_pool())
expected = pa.array(["baz", "baz", "bar"])
assert result1.equals(expected)
assert result2.equals(expected)
result3 = pc.take(arr, indices, memory_pool=pa.default_memory_pool())
assert result3.equals(expected)
def test_pickle_functions():
# Pickle registered functions
for name in pc.list_functions():
func = pc.get_function(name)
reconstructed = pickle.loads(pickle.dumps(func))
assert type(reconstructed) is type(func)
assert reconstructed.name == func.name
assert reconstructed.arity == func.arity
assert reconstructed.num_kernels == func.num_kernels
def test_pickle_global_functions():
# Pickle global wrappers (manual or automatic) of registered functions
for name in pc.list_functions():
func = getattr(pc, name)
reconstructed = pickle.loads(pickle.dumps(func))
assert reconstructed is func
def test_function_attributes():
# Sanity check attributes of registered functions
for name in pc.list_functions():
func = pc.get_function(name)
assert isinstance(func, pc.Function)
assert func.name == name
kernels = func.kernels
assert func.num_kernels == len(kernels)
assert all(isinstance(ker, pc.Kernel) for ker in kernels)
assert func.arity >= 1 # no varargs functions for now
repr(func)
for ker in kernels:
repr(ker)
def test_input_type_conversion():
# Automatic array conversion from Python
arr = pc.add([1, 2], [4, None])
assert arr.to_pylist() == [5, None]
# Automatic scalar conversion from Python
arr = pc.add([1, 2], 4)
assert arr.to_pylist() == [5, 6]
# Other scalar type
assert pc.equal(["foo", "bar", None],
"foo").to_pylist() == [True, False, None]
@pytest.mark.parametrize('arrow_type', numerical_arrow_types)
def test_sum_array(arrow_type):
arr = pa.array([1, 2, 3, 4], type=arrow_type)
assert arr.sum().as_py() == 10
assert pc.sum(arr).as_py() == 10
arr = pa.array([], type=arrow_type)
assert arr.sum().as_py() is None # noqa: E711
@pytest.mark.parametrize('arrow_type', numerical_arrow_types)
def test_sum_chunked_array(arrow_type):
arr = pa.chunked_array([pa.array([1, 2, 3, 4], type=arrow_type)])
assert pc.sum(arr).as_py() == 10
arr = pa.chunked_array([
pa.array([1, 2], type=arrow_type), pa.array([3, 4], type=arrow_type)
])
assert pc.sum(arr).as_py() == 10
arr = pa.chunked_array([
pa.array([1, 2], type=arrow_type),
pa.array([], type=arrow_type),
pa.array([3, 4], type=arrow_type)
])
assert pc.sum(arr).as_py() == 10
arr = pa.chunked_array((), type=arrow_type)
assert arr.num_chunks == 0
assert pc.sum(arr).as_py() is None # noqa: E711
def test_mode_array():
# ARROW-9917
arr = pa.array([1, 1, 3, 4, 3, 5], type='int64')
mode = pc.mode(arr)
assert len(mode) == 1
assert mode[0].as_py() == {"mode": 1, "count": 2}
mode = pc.mode(arr, 2)
assert len(mode) == 2
assert mode[0].as_py() == {"mode": 1, "count": 2}
assert mode[1].as_py() == {"mode": 3, "count": 2}
arr = pa.array([], type='int64')
assert len(pc.mode(arr)) == 0
def test_mode_chunked_array():
# ARROW-9917
arr = pa.chunked_array([pa.array([1, 1, 3, 4, 3, 5], type='int64')])
mode = pc.mode(arr)
assert len(mode) == 1
assert mode[0].as_py() == {"mode": 1, "count": 2}
mode = pc.mode(arr, 2)
assert len(mode) == 2
assert mode[0].as_py() == {"mode": 1, "count": 2}
assert mode[1].as_py() == {"mode": 3, "count": 2}
arr = pa.chunked_array((), type='int64')
assert arr.num_chunks == 0
assert len(pc.mode(arr)) == 0
def test_variance():
data = [1, 2, 3, 4, 5, 6, 7, 8]
assert pc.variance(data).as_py() == 5.25
assert pc.variance(data, ddof=0).as_py() == 5.25
assert pc.variance(data, ddof=1).as_py() == 6.0
def test_match_substring():
arr = pa.array(["ab", "abc", "ba", None])
result = pc.match_substring(arr, "ab")
expected = pa.array([True, True, False, None])
assert expected.equals(result)
def test_split_pattern():
arr = pa.array(["-foo---bar--", "---foo---b"])
result = pc.split_pattern(arr, pattern="---")
expected = pa.array([["-foo", "bar--"], ["", "foo", "b"]])
assert expected.equals(result)
result = pc.split_pattern(arr, pattern="---", max_splits=1)
expected = pa.array([["-foo", "bar--"], ["", "foo---b"]])
assert expected.equals(result)
result = pc.split_pattern(arr, pattern="---", max_splits=1, reverse=True)
expected = pa.array([["-foo", "bar--"], ["---foo", "b"]])
assert expected.equals(result)
def test_split_whitespace_utf8():
arr = pa.array(["foo bar", " foo \u3000\tb"])
result = pc.utf8_split_whitespace(arr)
expected = pa.array([["foo", "bar"], ["", "foo", "b"]])
assert expected.equals(result)
result = pc.utf8_split_whitespace(arr, max_splits=1)
expected = pa.array([["foo", "bar"], ["", "foo \u3000\tb"]])
assert expected.equals(result)
result = pc.utf8_split_whitespace(arr, max_splits=1, reverse=True)
expected = pa.array([["foo", "bar"], [" foo", "b"]])
assert expected.equals(result)
def test_split_whitespace_ascii():
arr = pa.array(["foo bar", " foo \u3000\tb"])
result = pc.ascii_split_whitespace(arr)
expected = pa.array([["foo", "bar"], ["", "foo", "\u3000", "b"]])
assert expected.equals(result)
result = pc.ascii_split_whitespace(arr, max_splits=1)
expected = pa.array([["foo", "bar"], ["", "foo \u3000\tb"]])
assert expected.equals(result)
result = pc.ascii_split_whitespace(arr, max_splits=1, reverse=True)
expected = pa.array([["foo", "bar"], [" foo \u3000", "b"]])
assert expected.equals(result)
def test_min_max():
# An example generated function wrapper with possible options
data = [4, 5, 6, None, 1]
s = pc.min_max(data)
assert s.as_py() == {'min': 1, 'max': 6}
s = pc.min_max(data, options=pc.MinMaxOptions())
assert s.as_py() == {'min': 1, 'max': 6}
s = pc.min_max(data, options=pc.MinMaxOptions(null_handling='skip'))
assert s.as_py() == {'min': 1, 'max': 6}
s = pc.min_max(data, options=pc.MinMaxOptions(null_handling='emit_null'))
assert s.as_py() == {'min': None, 'max': None}
# Options as dict of kwargs
s = pc.min_max(data, options={'null_handling': 'emit_null'})
assert s.as_py() == {'min': None, 'max': None}
# Options as named functions arguments
s = pc.min_max(data, null_handling='emit_null')
assert s.as_py() == {'min': None, 'max': None}
# Both options and named arguments
with pytest.raises(TypeError):
s = pc.min_max(data, options=pc.MinMaxOptions(),
null_handling='emit_null')
# Wrong options type
options = pc.TakeOptions()
with pytest.raises(TypeError):
s = pc.min_max(data, options=options)
# Missing argument
with pytest.raises(
TypeError,
match=r"min_max\(\) missing 1 required positional argument"):
s = pc.min_max()
def test_is_valid():
# An example generated function wrapper without options
data = [4, 5, None]
assert pc.is_valid(data).to_pylist() == [True, True, False]
with pytest.raises(TypeError):
pc.is_valid(data, options=None)
def test_generated_docstrings():
assert pc.min_max.__doc__ == textwrap.dedent("""\
Compute the minimum and maximum values of a numeric array.
Null values are ignored by default.
This can be changed through MinMaxOptions.
Parameters
----------
array : Array-like
Argument to compute function
memory_pool : pyarrow.MemoryPool, optional
If not passed, will allocate memory from the default memory pool.
options : pyarrow.compute.MinMaxOptions, optional
Parameters altering compute function semantics
**kwargs: optional
Parameters for MinMaxOptions constructor. Either `options`
or `**kwargs` can be passed, but not both at the same time.
""")
assert pc.add.__doc__ == textwrap.dedent("""\
Add the arguments element-wise.
Results will wrap around on integer overflow.
Use function "add_checked" if you want overflow
to return an error.
Parameters
----------
x : Array-like or scalar-like
Argument to compute function
y : Array-like or scalar-like
Argument to compute function
memory_pool : pyarrow.MemoryPool, optional
If not passed, will allocate memory from the default memory pool.
""")
# We use isprintable to find about codepoints that Python doesn't know, but
# utf8proc does (or in a future version of Python the other way around).
# These codepoints cannot be compared between Arrow and the Python
# implementation.
@lru_cache()
def find_new_unicode_codepoints():
new = set()
characters = [chr(c) for c in range(0x80, 0x11000)
if not (0xD800 <= c < 0xE000)]
is_printable = pc.utf8_is_printable(pa.array(characters)).to_pylist()
for i, c in enumerate(characters):
if is_printable[i] != c.isprintable():
new.add(ord(c))
return new
# Python claims there are not alpha, not sure why, they are in
# gc='Other Letter': https://graphemica.com/%E1%B3%B2
unknown_issue_is_alpha = {0x1cf2, 0x1cf3}
# utf8proc does not know if codepoints are lower case
utf8proc_issue_is_lower = {
0xaa, 0xba, 0x2b0, 0x2b1, 0x2b2, 0x2b3, 0x2b4,
0x2b5, 0x2b6, 0x2b7, 0x2b8, 0x2c0, 0x2c1, 0x2e0,
0x2e1, 0x2e2, 0x2e3, 0x2e4, 0x37a, 0x1d2c, 0x1d2d,
0x1d2e, 0x1d2f, 0x1d30, 0x1d31, 0x1d32, 0x1d33,
0x1d34, 0x1d35, 0x1d36, 0x1d37, 0x1d38, 0x1d39,
0x1d3a, 0x1d3b, 0x1d3c, 0x1d3d, 0x1d3e, 0x1d3f,
0x1d40, 0x1d41, 0x1d42, 0x1d43, 0x1d44, 0x1d45,
0x1d46, 0x1d47, 0x1d48, 0x1d49, 0x1d4a, 0x1d4b,
0x1d4c, 0x1d4d, 0x1d4e, 0x1d4f, 0x1d50, 0x1d51,
0x1d52, 0x1d53, 0x1d54, 0x1d55, 0x1d56, 0x1d57,
0x1d58, 0x1d59, 0x1d5a, 0x1d5b, 0x1d5c, 0x1d5d,
0x1d5e, 0x1d5f, 0x1d60, 0x1d61, 0x1d62, 0x1d63,
0x1d64, 0x1d65, 0x1d66, 0x1d67, 0x1d68, 0x1d69,
0x1d6a, 0x1d78, 0x1d9b, 0x1d9c, 0x1d9d, 0x1d9e,
0x1d9f, 0x1da0, 0x1da1, 0x1da2, 0x1da3, 0x1da4,
0x1da5, 0x1da6, 0x1da7, 0x1da8, 0x1da9, 0x1daa,
0x1dab, 0x1dac, 0x1dad, 0x1dae, 0x1daf, 0x1db0,
0x1db1, 0x1db2, 0x1db3, 0x1db4, 0x1db5, 0x1db6,
0x1db7, 0x1db8, 0x1db9, 0x1dba, 0x1dbb, 0x1dbc,
0x1dbd, 0x1dbe, 0x1dbf, 0x2071, 0x207f, 0x2090,
0x2091, 0x2092, 0x2093, 0x2094, 0x2095, 0x2096,
0x2097, 0x2098, 0x2099, 0x209a, 0x209b, 0x209c,
0x2c7c, 0x2c7d, 0xa69c, 0xa69d, 0xa770, 0xa7f8,
0xa7f9, 0xab5c, 0xab5d, 0xab5e, 0xab5f, }
# utf8proc does not store if a codepoint is numeric
numeric_info_missing = {
0x3405, 0x3483, 0x382a, 0x3b4d, 0x4e00, 0x4e03,
0x4e07, 0x4e09, 0x4e5d, 0x4e8c, 0x4e94, 0x4e96,
0x4ebf, 0x4ec0, 0x4edf, 0x4ee8, 0x4f0d, 0x4f70,
0x5104, 0x5146, 0x5169, 0x516b, 0x516d, 0x5341,
0x5343, 0x5344, 0x5345, 0x534c, 0x53c1, 0x53c2,
0x53c3, 0x53c4, 0x56db, 0x58f1, 0x58f9, 0x5e7a,
0x5efe, 0x5eff, 0x5f0c, 0x5f0d, 0x5f0e, 0x5f10,
0x62fe, 0x634c, 0x67d2, 0x6f06, 0x7396, 0x767e,
0x8086, 0x842c, 0x8cae, 0x8cb3, 0x8d30, 0x9621,
0x9646, 0x964c, 0x9678, 0x96f6, 0xf96b, 0xf973,
0xf978, 0xf9b2, 0xf9d1, 0xf9d3, 0xf9fd, 0x10fc5,
0x10fc6, 0x10fc7, 0x10fc8, 0x10fc9, 0x10fca,
0x10fcb, }
# utf8proc has no no digit/numeric information
digit_info_missing = {
0xb2, 0xb3, 0xb9, 0x1369, 0x136a, 0x136b, 0x136c,
0x136d, 0x136e, 0x136f, 0x1370, 0x1371, 0x19da, 0x2070,
0x2074, 0x2075, 0x2076, 0x2077, 0x2078, 0x2079, 0x2080,
0x2081, 0x2082, 0x2083, 0x2084, 0x2085, 0x2086, 0x2087,
0x2088, 0x2089, 0x2460, 0x2461, 0x2462, 0x2463, 0x2464,
0x2465, 0x2466, 0x2467, 0x2468, 0x2474, 0x2475, 0x2476,
0x2477, 0x2478, 0x2479, 0x247a, 0x247b, 0x247c, 0x2488,
0x2489, 0x248a, 0x248b, 0x248c, 0x248d, 0x248e, 0x248f,
0x2490, 0x24ea, 0x24f5, 0x24f6, 0x24f7, 0x24f8, 0x24f9,
0x24fa, 0x24fb, 0x24fc, 0x24fd, 0x24ff, 0x2776, 0x2777,
0x2778, 0x2779, 0x277a, 0x277b, 0x277c, 0x277d, 0x277e,
0x2780, 0x2781, 0x2782, 0x2783, 0x2784, 0x2785, 0x2786,
0x2787, 0x2788, 0x278a, 0x278b, 0x278c, 0x278d, 0x278e,
0x278f, 0x2790, 0x2791, 0x2792, 0x10a40, 0x10a41,
0x10a42, 0x10a43, 0x10e60, 0x10e61, 0x10e62, 0x10e63,
0x10e64, 0x10e65, 0x10e66, 0x10e67, 0x10e68, }
numeric_info_missing = {
0x3405, 0x3483, 0x382a, 0x3b4d, 0x4e00, 0x4e03,
0x4e07, 0x4e09, 0x4e5d, 0x4e8c, 0x4e94, 0x4e96,
0x4ebf, 0x4ec0, 0x4edf, 0x4ee8, 0x4f0d, 0x4f70,
0x5104, 0x5146, 0x5169, 0x516b, 0x516d, 0x5341,
0x5343, 0x5344, 0x5345, 0x534c, 0x53c1, 0x53c2,
0x53c3, 0x53c4, 0x56db, 0x58f1, 0x58f9, 0x5e7a,
0x5efe, 0x5eff, 0x5f0c, 0x5f0d, 0x5f0e, 0x5f10,
0x62fe, 0x634c, 0x67d2, 0x6f06, 0x7396, 0x767e,
0x8086, 0x842c, 0x8cae, 0x8cb3, 0x8d30, 0x9621,
0x9646, 0x964c, 0x9678, 0x96f6, 0xf96b, 0xf973,
0xf978, 0xf9b2, 0xf9d1, 0xf9d3, 0xf9fd, }
codepoints_ignore = {
'is_alnum': numeric_info_missing | digit_info_missing |
unknown_issue_is_alpha,
'is_alpha': unknown_issue_is_alpha,
'is_digit': digit_info_missing,
'is_numeric': numeric_info_missing,
'is_lower': utf8proc_issue_is_lower
}
@pytest.mark.parametrize('function_name', ['is_alnum', 'is_alpha',
'is_ascii', 'is_decimal',
'is_digit', 'is_lower',
'is_numeric', 'is_printable',
'is_space', 'is_upper', ])
@pytest.mark.parametrize('variant', ['ascii', 'utf8'])
def test_string_py_compat_boolean(function_name, variant):
arrow_name = variant + "_" + function_name
py_name = function_name.replace('_', '')
ignore = codepoints_ignore.get(function_name, set()) |\
find_new_unicode_codepoints()
for i in range(128 if ascii else 0x11000):
if i in range(0xD800, 0xE000):
continue # bug? pyarrow doesn't allow utf16 surrogates
# the issues we know of, we skip
if i in ignore:
continue
# Compare results with the equivalent Python predicate
# (except "is_space" where functions are known to be incompatible)
c = chr(i)
if hasattr(pc, arrow_name) and function_name != 'is_space':
ar = pa.array([c])
arrow_func = getattr(pc, arrow_name)
assert arrow_func(ar)[0].as_py() == getattr(c, py_name)()
@pytest.mark.parametrize(('ty', 'values'), all_array_types)
def test_take(ty, values):
arr = pa.array(values, type=ty)
for indices_type in [pa.int8(), pa.int64()]:
indices = pa.array([0, 4, 2, None], type=indices_type)
result = arr.take(indices)
result.validate()
expected = pa.array([values[0], values[4], values[2], None], type=ty)
assert result.equals(expected)
# empty indices
indices = pa.array([], type=indices_type)
result = arr.take(indices)
result.validate()
expected = pa.array([], type=ty)
assert result.equals(expected)
indices = pa.array([2, 5])
with pytest.raises(IndexError):
arr.take(indices)
indices = pa.array([2, -1])
with pytest.raises(IndexError):
arr.take(indices)
def test_take_indices_types():
arr = pa.array(range(5))
for indices_type in ['uint8', 'int8', 'uint16', 'int16',
'uint32', 'int32', 'uint64', 'int64']:
indices = pa.array([0, 4, 2, None], type=indices_type)
result = arr.take(indices)
result.validate()
expected = pa.array([0, 4, 2, None])
assert result.equals(expected)
for indices_type in [pa.float32(), pa.float64()]:
indices = pa.array([0, 4, 2], type=indices_type)
with pytest.raises(NotImplementedError):
arr.take(indices)
def test_take_on_chunked_array():
# ARROW-9504
arr = pa.chunked_array([
[
"a",
"b",
"c",
"d",
"e"
],
[
"f",
"g",
"h",
"i",
"j"
]
])
indices = np.array([0, 5, 1, 6, 9, 2])
result = arr.take(indices)
expected = pa.chunked_array([["a", "f", "b", "g", "j", "c"]])
assert result.equals(expected)
indices = pa.chunked_array([[1], [9, 2]])
result = arr.take(indices)
expected = pa.chunked_array([
[
"b"
],
[
"j",
"c"
]
])
assert result.equals(expected)
@pytest.mark.parametrize('ordered', [False, True])
def test_take_dictionary(ordered):
arr = pa.DictionaryArray.from_arrays([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'],
ordered=ordered)
result = arr.take(pa.array([0, 1, 3]))
result.validate()
assert result.to_pylist() == ['a', 'b', 'a']
assert result.dictionary.to_pylist() == ['a', 'b', 'c']
assert result.type.ordered is ordered
def test_take_null_type():
# ARROW-10027
arr = pa.array([None] * 10)
chunked_arr = pa.chunked_array([[None] * 5] * 2)
batch = pa.record_batch([arr], names=['a'])
table = pa.table({'a': arr})
indices = pa.array([1, 3, 7, None])
assert len(arr.take(indices)) == 4
assert len(chunked_arr.take(indices)) == 4
assert len(batch.take(indices).column(0)) == 4
assert len(table.take(indices).column(0)) == 4
@pytest.mark.parametrize(('ty', 'values'), all_array_types)
def test_filter(ty, values):
arr = pa.array(values, type=ty)
mask = pa.array([True, False, False, True, None])
result = arr.filter(mask, null_selection_behavior='drop')
result.validate()
assert result.equals(pa.array([values[0], values[3]], type=ty))
result = arr.filter(mask, null_selection_behavior='emit_null')
result.validate()
assert result.equals(pa.array([values[0], values[3], None], type=ty))
# non-boolean dtype
mask = pa.array([0, 1, 0, 1, 0])
with pytest.raises(NotImplementedError):
arr.filter(mask)
# wrong length
mask = pa.array([True, False, True])
with pytest.raises(ValueError, match="must all be the same length"):
arr.filter(mask)
def test_filter_chunked_array():
arr = pa.chunked_array([["a", None], ["c", "d", "e"]])
expected_drop = pa.chunked_array([["a"], ["e"]])
expected_null = pa.chunked_array([["a"], [None, "e"]])
for mask in [
# mask is array
pa.array([True, False, None, False, True]),
# mask is chunked array
pa.chunked_array([[True, False, None], [False, True]]),
# mask is python object
[True, False, None, False, True]
]:
result = arr.filter(mask)
assert result.equals(expected_drop)
result = arr.filter(mask, null_selection_behavior="emit_null")
assert result.equals(expected_null)
def test_filter_record_batch():
batch = pa.record_batch(
[pa.array(["a", None, "c", "d", "e"])], names=["a'"])
# mask is array
mask = pa.array([True, False, None, False, True])
result = batch.filter(mask)
expected = pa.record_batch([pa.array(["a", "e"])], names=["a'"])
assert result.equals(expected)
result = batch.filter(mask, null_selection_behavior="emit_null")
expected = pa.record_batch([pa.array(["a", None, "e"])], names=["a'"])
assert result.equals(expected)
def test_filter_table():
table = pa.table([pa.array(["a", None, "c", "d", "e"])], names=["a"])
expected_drop = pa.table([pa.array(["a", "e"])], names=["a"])
expected_null = pa.table([pa.array(["a", None, "e"])], names=["a"])
for mask in [
# mask is array
pa.array([True, False, None, False, True]),
# mask is chunked array
pa.chunked_array([[True, False], [None, False, True]]),
# mask is python object
[True, False, None, False, True]
]:
result = table.filter(mask)
assert result.equals(expected_drop)
result = table.filter(mask, null_selection_behavior="emit_null")
assert result.equals(expected_null)
def test_filter_errors():
arr = pa.chunked_array([["a", None], ["c", "d", "e"]])
batch = pa.record_batch(
[pa.array(["a", None, "c", "d", "e"])], names=["a'"])
table = pa.table([pa.array(["a", None, "c", "d", "e"])], names=["a"])
for obj in [arr, batch, table]:
# non-boolean dtype
mask = pa.array([0, 1, 0, 1, 0])
with pytest.raises(NotImplementedError):
obj.filter(mask)
# wrong length
mask = pa.array([True, False, True])
with pytest.raises(pa.ArrowInvalid,
match="must all be the same length"):
obj.filter(mask)
def test_filter_null_type():
# ARROW-10027
arr = pa.array([None] * 10)
chunked_arr = pa.chunked_array([[None] * 5] * 2)
batch = pa.record_batch([arr], names=['a'])
table = pa.table({'a': arr})
mask = pa.array([True, False] * 5)
assert len(arr.filter(mask)) == 5
assert len(chunked_arr.filter(mask)) == 5
assert len(batch.filter(mask).column(0)) == 5
assert len(table.filter(mask).column(0)) == 5
@pytest.mark.parametrize("typ", ["array", "chunked_array"])
def test_compare_array(typ):
if typ == "array":
def con(values): return pa.array(values)
else:
def con(values): return pa.chunked_array([values])
arr1 = con([1, 2, 3, 4, None])
arr2 = con([1, 1, 4, None, 4])
result = pc.equal(arr1, arr2)
assert result.equals(con([True, False, False, None, None]))
result = pc.not_equal(arr1, arr2)
assert result.equals(con([False, True, True, None, None]))
result = pc.less(arr1, arr2)
assert result.equals(con([False, False, True, None, None]))
result = pc.less_equal(arr1, arr2)
assert result.equals(con([True, False, True, None, None]))
result = pc.greater(arr1, arr2)
assert result.equals(con([False, True, False, None, None]))
result = pc.greater_equal(arr1, arr2)
assert result.equals(con([True, True, False, None, None]))
@pytest.mark.parametrize("typ", ["array", "chunked_array"])
def test_compare_scalar(typ):
if typ == "array":
def con(values): return pa.array(values)
else:
def con(values): return pa.chunked_array([values])
arr = con([1, 2, 3, None])
# TODO this is a hacky way to construct a scalar ..
scalar = pa.array([2]).sum()
result = pc.equal(arr, scalar)
assert result.equals(con([False, True, False, None]))
result = pc.not_equal(arr, scalar)
assert result.equals(con([True, False, True, None]))
result = pc.less(arr, scalar)
assert result.equals(con([True, False, False, None]))
result = pc.less_equal(arr, scalar)
assert result.equals(con([True, True, False, None]))
result = pc.greater(arr, scalar)
assert result.equals(con([False, False, True, None]))
result = pc.greater_equal(arr, scalar)
assert result.equals(con([False, True, True, None]))
def test_compare_chunked_array_mixed():
arr = pa.array([1, 2, 3, 4, None])
arr_chunked = pa.chunked_array([[1, 2, 3], [4, None]])
arr_chunked2 = pa.chunked_array([[1, 2], [3, 4, None]])
expected = pa.chunked_array([[True, True, True, True, None]])
for left, right in [
(arr, arr_chunked),
(arr_chunked, arr),
(arr_chunked, arr_chunked2),
]:
result = pc.equal(left, right)
assert result.equals(expected)
def test_arithmetic_add():
left = pa.array([1, 2, 3, 4, 5])
right = pa.array([0, -1, 1, 2, 3])
result = pc.add(left, right)
expected = pa.array([1, 1, 4, 6, 8])
assert result.equals(expected)
def test_arithmetic_subtract():
left = pa.array([1, 2, 3, 4, 5])
right = pa.array([0, -1, 1, 2, 3])
result = pc.subtract(left, right)
expected = pa.array([1, 3, 2, 2, 2])
assert result.equals(expected)
def test_arithmetic_multiply():
left = pa.array([1, 2, 3, 4, 5])
right = pa.array([0, -1, 1, 2, 3])
result = pc.multiply(left, right)
expected = pa.array([0, -2, 3, 8, 15])
assert result.equals(expected)
def test_is_null():
arr = pa.array([1, 2, 3, None])
result = arr.is_null()
result = arr.is_null()
expected = pa.array([False, False, False, True])
assert result.equals(expected)
assert result.equals(pc.is_null(arr))
result = arr.is_valid()
expected = pa.array([True, True, True, False])
assert result.equals(expected)
assert result.equals(pc.is_valid(arr))
arr = pa.chunked_array([[1, 2], [3, None]])
result = arr.is_null()
expected = pa.chunked_array([[False, False], [False, True]])
assert result.equals(expected)
result = arr.is_valid()
expected = pa.chunked_array([[True, True], [True, False]])
assert result.equals(expected)
def test_fill_null():
arr = pa.array([1, 2, None, 4], type=pa.int8())
fill_value = pa.array([5], type=pa.int8())
with pytest.raises(pa.ArrowInvalid, match="tried to convert to int"):
arr.fill_null(fill_value)
arr = pa.array([None, None, None, None], type=pa.null())
fill_value = pa.scalar(None, type=pa.null())
result = arr.fill_null(fill_value)
expected = pa.array([None, None, None, None])
assert result.equals(expected)
arr = pa.array(['a', 'bb', None])
result = arr.fill_null('ccc')
expected = pa.array(['a', 'bb', 'ccc'])
assert result.equals(expected)
arr = pa.array([b'a', b'bb', None], type=pa.large_binary())
result = arr.fill_null('ccc')
expected = pa.array([b'a', b'bb', b'ccc'], type=pa.large_binary())
assert result.equals(expected)
@pytest.mark.parametrize('arrow_type', numerical_arrow_types)
def test_fill_null_array(arrow_type):
arr = pa.array([1, 2, None, 4], type=arrow_type)
fill_value = pa.scalar(5, type=arrow_type)
result = arr.fill_null(fill_value)
expected = pa.array([1, 2, 5, 4], type=arrow_type)
assert result.equals(expected)
# Implicit conversions
result = arr.fill_null(5)
assert result.equals(expected)
# ARROW-9451: Unsigned integers allow this for some reason
if not pa.types.is_unsigned_integer(arr.type):
with pytest.raises((ValueError, TypeError)):
arr.fill_null('5')
result = arr.fill_null(pa.scalar(5, type='int8'))
assert result.equals(expected)
@pytest.mark.parametrize('arrow_type', numerical_arrow_types)
def test_fill_null_chunked_array(arrow_type):
fill_value = pa.scalar(5, type=arrow_type)
arr = pa.chunked_array([pa.array([None, 2, 3, 4], type=arrow_type)])
result = arr.fill_null(fill_value)
expected = pa.chunked_array([pa.array([5, 2, 3, 4], type=arrow_type)])
assert result.equals(expected)
arr = pa.chunked_array([
pa.array([1, 2], type=arrow_type),
pa.array([], type=arrow_type),
pa.array([None, 4], type=arrow_type)
])
expected = pa.chunked_array([
pa.array([1, 2], type=arrow_type),
pa.array([], type=arrow_type),
pa.array([5, 4], type=arrow_type)
])
result = arr.fill_null(fill_value)
assert result.equals(expected)
# Implicit conversions
result = arr.fill_null(5)
assert result.equals(expected)
result = arr.fill_null(pa.scalar(5, type='int8'))
assert result.equals(expected)
def test_logical():
a = pa.array([True, False, False, None])
b = pa.array([True, True, False, True])
assert pc.and_(a, b) == pa.array([True, False, False, None])
assert pc.and_kleene(a, b) == pa.array([True, False, False, None])
assert pc.or_(a, b) == pa.array([True, True, False, None])
assert pc.or_kleene(a, b) == pa.array([True, True, False, True])
assert pc.xor(a, b) == pa.array([False, True, False, None])
assert pc.invert(a) == pa.array([False, True, True, None])
def test_cast():
arr = pa.array([2**63 - 1], type='int64')
with pytest.raises(pa.ArrowInvalid):
pc.cast(arr, 'int32')
assert pc.cast(arr, 'int32', safe=False) == pa.array([-1], type='int32')
arr = pa.array([datetime(2010, 1, 1), datetime(2015, 1, 1)])
expected = pa.array([1262304000000, 1420070400000], type='timestamp[ms]')
assert pc.cast(arr, 'timestamp[ms]') == expected
def test_strptime():
arr = pa.array(["5/1/2020", None, "12/13/1900"])
got = pc.strptime(arr, format='%m/%d/%Y', unit='s')
expected = pa.array([datetime(2020, 5, 1), None, datetime(1900, 12, 13)],
type=pa.timestamp('s'))
assert got == expected
def test_count():
arr = pa.array([1, 2, 3, None, None])
assert pc.count(arr).as_py() == 3
assert pc.count(arr, count_mode='count_non_null').as_py() == 3
assert pc.count(arr, count_mode='count_null').as_py() == 2
with pytest.raises(ValueError, match="'zzz' is not a valid count_mode"):
pc.count(arr, count_mode='zzz')
def test_partition_nth():
data = list(range(100, 140))
random.shuffle(data)
pivot = 10
indices = pc.partition_nth_indices(data, pivot=pivot).to_pylist()
assert len(indices) == len(data)
assert sorted(indices) == list(range(len(data)))
assert all(data[indices[i]] <= data[indices[pivot]]
for i in range(pivot))
assert all(data[indices[i]] >= data[indices[pivot]]
for i in range(pivot, len(data)))
|
xhochy/arrow
|
python/pyarrow/tests/test_compute.py
|
Python
|
apache-2.0
| 34,188 | 0 |
import base64
import json
import responses
from mapbox.services.datasets import Datasets
username = 'testuser'
access_token = 'pk.{0}.test'.format(
base64.b64encode(b'{"u":"testuser"}').decode('utf-8'))
def test_class_attrs():
"""Get expected class attr values"""
serv = Datasets()
assert serv.api_name == 'datasets'
assert serv.api_version == 'v1'
def test_datasets_service_properties():
"""Get expected username and baseuri."""
datasets = Datasets(access_token=access_token)
assert datasets.username == username
assert datasets.baseuri == 'https://api.mapbox.com/datasets/v1'
@responses.activate
def test_datasets_list():
"""Listing datasets works"""
body = '''
[
{
"owner": "testuser",
"id": "ds1",
"created": "2015-09-19",
"modified": "2015-09-19"
},
{
"owner": "testuser",
"id": "ds2",
"created": "2015-09-19",
"modified": "2015-09-19"
}
]
'''
responses.add(
responses.GET,
'https://api.mapbox.com/datasets/v1/{0}?access_token={1}'.format(
username, access_token),
match_querystring=True,
body=body, status=200,
content_type='application/json')
response = Datasets(access_token=access_token).list()
assert response.status_code == 200
assert [item['id'] for item in response.json()] == ['ds1', 'ds2']
@responses.activate
def test_datasets_create():
"""Creating a named and described dataset works."""
def request_callback(request):
payload = json.loads(request.body.decode())
resp_body = {
'owner': username,
'id': 'new',
'name': payload['name'],
'description': payload['description'],
'created': '2015-09-19',
'modified': '2015-09-19'}
headers = {}
return (200, headers, json.dumps(resp_body))
responses.add_callback(
responses.POST,
'https://api.mapbox.com/datasets/v1/{0}?access_token={1}'.format(
username, access_token),
match_querystring=True,
callback=request_callback)
response = Datasets(access_token=access_token).create(
name='things', description='a collection of things')
assert response.status_code == 200
assert response.json()['name'] == 'things'
assert response.json()['description'] == 'a collection of things'
@responses.activate
def test_dataset_read():
"""Dataset name and description reading works."""
responses.add(
responses.GET,
'https://api.mapbox.com/datasets/v1/{0}/{1}?access_token={2}'.format(
username, 'test', access_token),
match_querystring=True,
body=json.dumps(
{'name': 'things', 'description': 'a collection of things'}),
status=200,
content_type='application/json')
response = Datasets(access_token=access_token).read_dataset('test')
assert response.status_code == 200
assert response.json()['name'] == 'things'
assert response.json()['description'] == 'a collection of things'
@responses.activate
def test_dataset_update():
"""Updating dataset name and description works."""
def request_callback(request):
payload = json.loads(request.body.decode())
resp_body = {
'owner': username,
'id': 'foo',
'name': payload['name'],
'description': payload['description'],
'created': '2015-09-19',
'modified': '2015-09-19'}
headers = {}
return (200, headers, json.dumps(resp_body))
responses.add_callback(
responses.PATCH,
'https://api.mapbox.com/datasets/v1/{0}/{1}?access_token={2}'.format(
username, 'foo', access_token),
match_querystring=True,
callback=request_callback)
response = Datasets(access_token=access_token).update_dataset(
'foo', name='things', description='a collection of things')
assert response.status_code == 200
assert response.json()['name'] == 'things'
assert response.json()['description'] == 'a collection of things'
@responses.activate
def test_delete_dataset():
"""Delete a dataset"""
responses.add(
responses.DELETE,
'https://api.mapbox.com/datasets/v1/{0}/{1}?access_token={2}'.format(
username, 'test', access_token),
match_querystring=True,
status=204)
response = Datasets(access_token=access_token).delete_dataset('test')
assert response.status_code == 204
@responses.activate
def test_dataset_list_features():
"""Features retrieval work"""
responses.add(
responses.GET,
'https://api.mapbox.com/datasets/v1/{0}/{1}/features?access_token={2}'.format(
username, 'test', access_token),
match_querystring=True,
body=json.dumps({'type': 'FeatureCollection'}),
status=200,
content_type='application/json')
response = Datasets(access_token=access_token).list_features('test')
assert response.status_code == 200
assert response.json()['type'] == 'FeatureCollection'
@responses.activate
def test_dataset_list_features_reverse():
"""Features retrieval in reverse works"""
responses.add(
responses.GET,
'https://api.mapbox.com/datasets/v1/{0}/{1}/features?access_token={2}&reverse=true'.format(
username, 'test', access_token),
match_querystring=True,
body=json.dumps({'type': 'FeatureCollection'}),
status=200,
content_type='application/json')
response = Datasets(access_token=access_token).list_features(
'test', reverse=True)
assert response.status_code == 200
assert response.json()['type'] == 'FeatureCollection'
@responses.activate
def test_dataset_list_features_pagination():
"""Features retrieval pagination works"""
responses.add(
responses.GET,
'https://api.mapbox.com/datasets/v1/{0}/{1}/features?access_token={2}&start=1&limit=1'.format(
username, 'test', access_token),
match_querystring=True,
body=json.dumps({'type': 'FeatureCollection'}),
status=200,
content_type='application/json')
response = Datasets(access_token=access_token).list_features(
'test', start=1, limit=1)
assert response.status_code == 200
assert response.json()['type'] == 'FeatureCollection'
# Tests of feature-scoped methods.
@responses.activate
def test_read_feature():
"""Feature read works."""
responses.add(
responses.GET,
'https://api.mapbox.com/datasets/v1/{0}/{1}/features/{2}?access_token={3}'.format(
username, 'test', '1', access_token),
match_querystring=True,
body=json.dumps({'type': 'Feature', 'id': '1'}),
status=200,
content_type='application/json')
response = Datasets(access_token=access_token).read_feature('test', '1')
assert response.status_code == 200
assert response.json()['type'] == 'Feature'
assert response.json()['id'] == '1'
@responses.activate
def test_update_feature():
"""Feature update works."""
def request_callback(request):
payload = json.loads(request.body.decode())
assert payload == {'type': 'Feature'}
return (200, {}, "")
responses.add_callback(
responses.PUT,
'https://api.mapbox.com/datasets/v1/{0}/{1}/features/{2}?access_token={3}'.format(
username, 'test', '1', access_token),
match_querystring=True,
callback=request_callback)
response = Datasets(access_token=access_token).update_feature(
'test', '1', {'type': 'Feature'})
assert response.status_code == 200
@responses.activate
def test_delete_feature():
"""Deletes a feature."""
responses.add(
responses.DELETE,
'https://api.mapbox.com/datasets/v1/{0}/{1}/features/{2}?access_token={3}'.format(
username, 'test', '1', access_token),
match_querystring=True,
status=204)
response = Datasets(access_token=access_token).delete_feature('test', '1')
assert response.status_code == 204
|
mapbox/mapbox-sdk-py
|
tests/test_datasets.py
|
Python
|
mit
| 8,145 | 0.000859 |
# encoding=utf-8
from tsundiary import app
app.jinja_env.globals.update(theme_nicename = {
'classic': 'Classic Orange',
'tsun-chan': 'Classic Orange w/ Tsundiary-chan',
'minimal': 'Minimal Black/Grey',
'misato-tachibana': 'Misato Tachibana',
'rei-ayanami': 'Rei Ayanami',
'rei-ayanami-2': 'Rei Ayanami 2',
'saya': 'Saya',
'yuno': 'Yuno Gasai',
'hitagi': 'Hitagi Senjougahara',
'kyoko-sakura': 'Kyoko Sakura',
'colorful': 'Based on favorite color'
})
app.jinja_env.globals.update(themes = ['classic', 'tsun-chan', 'minimal', 'misato-tachibana', 'rei-ayanami', 'rei-ayanami-2', 'saya', 'yuno', 'colorful'])
app.jinja_env.globals.update(theme_creds = {
'tsun-chan': 'Artist: <span title="<3">bdgtard</span>',
'misato-tachibana': 'Misato Tachibana source: Nichijou OP1.',
'rei-ayanami': '<a href="http://megadud20.deviantart.com/art/Rei-Ayanami-Vector-214547575">Rei source</a>.',
'saya': u'<a href="http://e-shuushuu.net/image/34277/">Saya source</a>. Artist: 中央東口 (Chuuou Higashiguchi).',
'yuno': '<a href="http://xyanderegirl.deviantart.com/art/Yuno-Gasai-Render-293856645">Yuno source</a>.',
'kyoko-sakura': '<a href="http://3071527.deviantart.com/art/kyoko-sakura-376238110">Kyoko source</a>.'
})
app.jinja_env.globals.update(theme_colors = [
('Red', '0,100,100'),
('Orange', '35,100,100'),
('Yellow', '50,100,100'),
('Green', '120,100,80'),
('Cyan', '180,100,80'),
('Blue', '215,100,100'),
('Purple', '270,100,100'),
('Black', '0,0,0'),
('Grey', '0,0,70'),
('White', '0,0,100'),
])
|
neynt/tsundiary
|
tsundiary/jinja_env.py
|
Python
|
mit
| 1,603 | 0.00815 |
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for win32functions.py"""
import unittest
import sys
sys.path.append(".")
from pywinauto.win32structures import POINT # noqa: E402
from pywinauto.win32structures import RECT # noqa: E402
from pywinauto.win32functions import MakeLong, HiWord, LoWord # noqa: E402
class Win32FunctionsTestCases(unittest.TestCase):
"Unit tests for the win32function methods"
def testMakeLong(self):
data = (
(0, (0, 0)),
(1, (0, 1)),
(0x10000, (1, 0)),
(0xffff, (0, 0xffff)),
(0xffff0000, (0xffff, 0)),
(0xffffffff, (0xffff, 0xffff)),
(0, (0x10000, 0x10000)),
)
for result, (hi, lo) in data:
self.assertEqual(result, MakeLong(hi, lo))
def testMakeLong_zero(self):
"test that makelong(0,0)"
self.assertEqual(0, MakeLong(0, 0))
def testMakeLong_lowone(self):
"Make sure MakeLong() function works with low word == 1"
self.assertEqual(1, MakeLong(0, 1))
def testMakeLong_highone(self):
"Make sure MakeLong() function works with high word == 1"
self.assertEqual(0x10000, MakeLong(1, 0))
def testMakeLong_highbig(self):
"Make sure MakeLong() function works with big numder in high word"
self.assertEqual(0xffff0000, MakeLong(0xffff, 0))
def testMakeLong_lowbig(self):
"Make sure MakeLong() function works with big numder in low word"
self.assertEqual(0xffff, MakeLong(0, 0xffff))
def testMakeLong_big(self):
"Make sure MakeLong() function works with big numders in 2 words"
self.assertEqual(0xffffffff, MakeLong(0xffff, 0xffff))
def testLowWord_zero(self):
self.assertEqual(0, LoWord(0))
def testLowWord_one(self):
self.assertEqual(1, LoWord(1))
def testLowWord_big(self):
self.assertEqual(1, LoWord(MakeLong(0xffff, 1)))
def testLowWord_vbig(self):
self.assertEqual(0xffff, LoWord(MakeLong(0xffff, 0xffff)))
def testHiWord_zero(self):
self.assertEqual(0, HiWord(0))
def testHiWord_one(self):
self.assertEqual(0, HiWord(1))
def testHiWord_bigone(self):
self.assertEqual(1, HiWord(0x10000))
def testHiWord_big(self):
self.assertEqual(0xffff, HiWord(MakeLong(0xffff, 1)))
def testHiWord_vbig(self):
self.assertEqual(0xffff, HiWord(MakeLong(0xffff, 0xffff)))
def testPOINTindexation(self):
p = POINT(1, 2)
self.assertEqual(p[0], p.x)
self.assertEqual(p[1], p.y)
self.assertEqual(p[-2], p.x)
self.assertEqual(p[-1], p.y)
self.assertRaises(IndexError, lambda: p[2])
self.assertRaises(IndexError, lambda: p[-3])
def testPOINTiteration(self):
p = POINT(1, 2)
self.assertEqual([1, 2], [i for i in p])
def testPOINTcomparision(self):
"""Test POINT comparision operations"""
p0 = POINT(1, 2)
p1 = POINT(0, 2)
self.assertNotEqual(p0, p1)
p1.x = p0.x
self.assertEqual(p0, p1)
# tuple comparision
self.assertEqual(p0, (1, 2))
self.assertNotEqual(p0, (0, 2))
# wrong type comparision
self.assertNotEqual(p0, 1)
def test_RECT_hash(self):
"""Test RECT is hashable"""
r0 = RECT(0)
r1 = RECT(1)
d = { "r0": r0, "r1": r1 }
self.assertEqual(r0, d["r0"])
self.assertEqual(r1, d["r1"])
self.assertNotEqual(r0, r1)
def test_RECT_repr(self):
"""Test RECT repr"""
r0 = RECT(0)
self.assertEqual(r0.__repr__(), "<RECT L0, T0, R0, B0>")
if __name__ == "__main__":
unittest.main()
|
vasily-v-ryabov/pywinauto
|
pywinauto/unittests/test_win32functions.py
|
Python
|
bsd-3-clause
| 5,560 | 0.00054 |
import logging
import os
from ftplib import FTP as FTPClient
from paramiko import SFTPClient, Transport as SFTPTransport
ALLOWED_BACKEND_TYPES = ['ftp', 'sftp']
DEFAULT_BACKEND_TYPE = 'ftp'
from wok_hooks.misc import Configuration as _Configuration
class Configuration(_Configuration):
def __init__(self, path, **kwargs):
_Configuration.__init__(self, path, **kwargs)
if not 'type' in self or not self['type'] in ALLOWED_BACKEND_TYPES:
self['type'] = DEFAULT_BACKEND_TYPE
self.save()
class Observable:
def __init__(self, observer=None):
self._observer = []
if observer:
for item in observer:
self.register_observer(item)
def register_observer(self, observer):
self._observer.append(observer)
class Stateful(Observable):
def __init__(self, observer=None):
if not hasattr(self, '_state'):
self._state = None
Observable.__init__(self, observer)
if self._state is None:
raise NotImplementedError()
@property
def state(self):
return self._state
@state.setter
def state(self, value):
if value != self._state:
self._state = value
logging.info('%s is now %s' % (self, value))
self._raise_state_update()
def _raise_state_update(self):
for observer in self._observer:
observer.on_state_update(self)
class FileBackend(Stateful):
STATE_DISCONNECTED = 'disconnected'
STATE_CONNECTED = 'connected'
class ConnectionException(Exception):
pass
def __init__(self, config, observer=None):
self.config = config
self._state = self.STATE_DISCONNECTED
Stateful.__init__(self, observer)
def file_create_folder(self, path):
raise NotImplementedError()
def put_file(self, path, file_handle):
raise NotImplementedError()
def get_metadata(self, path):
raise NotImplementedError()
def get_file_and_metadata(self, path):
raise NotImplementedError()
def get_root_path(self):
raise NotImplementedError()
def connect(self):
raise NotImplementedError()
def disconnect(self):
raise NotImplementedError()
class FTP(FileBackend):
def __init__(self, config):
FileBackend.__init__(self, config)
self._init_config()
self.session = None
self._init_session()
DEFAULT_CONFIG = {
'ftp_host': 'localhost',
'ftp_user': 'anonymous',
'ftp_password': '',
'ftp_output_path': ''}
def _init_config(self):
some_changes = False
if 'type' in self.config:
for option, value in FTP.DEFAULT_CONFIG.items():
if not option in self.config:
self.config[option] = value
some_changes = True
logging.info('set default ftp config.')
else:
self.config['type'] = 'ftp'
self.config.update(FTP.DEFAULT_CONFIG)
some_changes = True
logging.info('set default ftp config.')
if some_changes:
self.config.save()
def _init_session(self):
self.connect()
def connect(self):
self._authenticate()
self.state = self.STATE_CONNECTED
def _authenticate(self):
self.session = FTPClient(self.config['ftp_host'],
self.config['ftp_user'],
self.config['ftp_password'])
logging.info('FTP Authorization succeed')
def disconnect(self):
if self.session:
self.session.quit()
def file_create_folder(self, path):
if self.state == self.STATE_CONNECTED:
self.session.cwd('/')
dirlist = path.split('/')
while '' in dirlist:
dirlist.remove('')
previous = self.session.pwd()
for dirname in dirlist:
dir_contents = self.session.nlst(previous)
if not dirname in dir_contents:
self.session.mkd(dirname)
self.session.cwd(dirname)
previous += dirname + '/'
elif self.state == self.STATE_DISCONNECTED:
raise self.ConnectionException('FTP is %s' % self.state)
else:
raise NotImplementedError()
def put_file(self, path, file_handle):
if self.state == self.STATE_CONNECTED:
dirpath = '/'.join(path.split('/')[:-1])
self.file_create_folder(dirpath)
self.session.storbinary('STOR ' + path.split('/')[-1], file_handle)
elif self.state == self.STATE_DISCONNECTED:
raise self.ConnectionException('FTP is %s' % self.state)
else:
raise NotImplementedError()
def get_root_path(self):
raise NotImplementedError()
class SFTP(FileBackend):
def __init__(self, config):
FileBackend.__init__(self, config)
self._init_config()
self.session = None
self._init_session()
DEFAULT_CONFIG = {
'sftp_host': 'localhost',
'sftp_port': 22,
'sftp_user': 'anonymous',
'sftp_password': '',
'output_path': ''}
def _init_config(self):
some_changes = False
if 'type' in self.config:
for option, value in SFTP.DEFAULT_CONFIG.items():
if not option in self.config:
self.config[option] = value
some_changes = True
logging.info('set default sftp config.')
else:
self.config['type'] = 'sftp'
self.config.update(SFTP.DEFAULT_CONFIG)
some_changes = True
logging.info('set default sftp config.')
if some_changes:
self.config.save()
# cast config types
self.config['sftp_port'] = int(self.config['sftp_port'])
def _init_session(self):
self.connect()
def connect(self):
self._authenticate()
self.state = self.STATE_CONNECTED
def _authenticate(self):
self._transport = SFTPTransport((self.config['sftp_host'],
self.config['sftp_port']))
self._transport.connect(username=self.config['sftp_user'],
password=self.config['sftp_password'])
self.session = SFTPClient.from_transport(self._transport)
logging.info('SFTP Authorization succeed')
def disconnect(self):
self.session.close()
self._transport.close()
def file_create_folder(self, path):
if self.state == self.STATE_CONNECTED:
dirlist = path.split('/')
current_dirlist = ['']
missing_dirlist = []
current_dirlist.extend(dirlist[:])
while len(current_dirlist) > 0:
current_path = '/'.join(current_dirlist)
try:
self.session.chdir(current_path)
break
except:
missing_dirlist.append(current_dirlist.pop())
missing_dirlist.reverse()
for dirname in missing_dirlist:
dir_contents = self.session.listdir()
if not dirname in dir_contents:
self.session.mkdir(dirname)
logging.info('Create remote directory %s' % self.session.getcwd() + '/' + dirname)
self.session.chdir(dirname)
elif self.state == self.STATE_DISCONNECTED:
raise self.ConnectionException('SFTP is %s' % self.state)
else:
raise NotImplementedError()
def put_file(self, path, file_handle):
if self.state == self.STATE_CONNECTED:
dirpath = '/'.join(path.split('/')[:-1])
self.file_create_folder(dirpath)
try:
self.session.putfo(fl=file_handle, remotepath='/' + path)
logging.info('Create remote file %s' % '/' + path)
except Exception as ex:
logging.error(ex)
elif self.state == self.STATE_DISCONNECTED:
raise self.ConnectionException('SFTP is %s' % self.state)
else:
raise NotImplementedError()
def distribute_output(options, output_path=None):
if not output_path:
from wok.engine import Engine # @UnresolvedImport
import yaml
options = Engine.default_options.copy()
if os.path.isfile('config'):
with open('config') as f:
yaml_config = yaml.load(f)
if yaml_config:
options.update(yaml_config)
output_path = options['output_dir']
remote_server = None
try:
config = Configuration('distribute.config')
if config['type'] == 'ftp':
remote_server = FTP(config)
if config['type'] == 'sftp':
remote_server = SFTP(config)
for root, dirnames, filenames in os.walk(output_path, topdown=True):
for filename in filenames:
path = os.path.sep.join([root, filename])
file_handle = open(path, 'rb')
try:
remote_server.put_file(path.replace(output_path,
remote_server.config['output_path']),
file_handle)
except Exception as ex:
file_handle.close()
logging.error(ex)
raise ex
except Exception as ex:
logging.error(ex)
raise
finally:
remote_server.disconnect()
|
abbgrade/wok_hooks
|
wok_hooks/hook_distribute.py
|
Python
|
mit
| 9,715 | 0.001029 |
from datetime import datetime
from email.mime import text as mime_text
from unittest.mock import MagicMock
from unittest.mock import Mock
from unittest.mock import patch
import cauldron as cd
from cauldron.session import reloading
from cauldron.test import support
from cauldron.test.support import scaffolds
from cauldron.test.support.messages import Message
class TestSessionReloading(scaffolds.ResultsTest):
"""Test suite for the reloading module"""
def test_watch_bad_argument(self):
"""Should not reload a module"""
self.assertFalse(
reloading.refresh(datetime, force=True),
Message('Should not reload not a module')
)
def test_watch_good_argument(self):
"""Should reload the specified package/subpackage"""
self.assertTrue(
reloading.refresh('datetime', force=True),
Message('Should reload the datetime module')
)
def test_watch_not_needed(self):
"""Don't reload modules that haven't changed."""
support.create_project(self, 'betty')
project = cd.project.get_internal_project()
project.current_step = project.steps[0]
self.assertFalse(
reloading.refresh(mime_text),
Message('Expect no reload if the step has not been run before.')
)
support.run_command('run')
project.current_step = project.steps[0]
self.assertFalse(
reloading.refresh(mime_text),
Message('Expect no reload if module has not changed recently.')
)
def test_watch_recursive(self):
"""Should reload the email module."""
self.assertTrue(
reloading.refresh('email', recursive=True, force=True),
Message('Expected email module to be reloaded.')
)
def test_get_module_name(self):
"""Should get the module name from the name of its spec."""
target = MagicMock()
target.__spec__ = MagicMock()
target.__spec__.name = 'hello'
self.assertEqual('hello', reloading.get_module_name(target))
def test_get_module_name_alternate(self):
"""
Should get the module name from its dunder name if the spec name
does not exist.
"""
target = Mock(['__name__'])
target.__name__ = 'hello'
self.assertEqual('hello', reloading.get_module_name(target))
@patch('cauldron.session.reloading.os.path')
@patch('cauldron.session.reloading.importlib.reload')
def test_do_reload_error(self, reload: MagicMock, os_path: MagicMock):
"""Should fail to import the specified module and so return False."""
target = MagicMock()
target.__file__ = None
target.__path__ = ['fake']
os_path.getmtime.return_value = 10
reload.side_effect = ImportError('FAKE')
self.assertFalse(reloading.do_reload(target, 0))
self.assertEqual(1, reload.call_count)
@patch('cauldron.session.reloading.os.path')
@patch('cauldron.session.reloading.importlib.reload')
def test_do_reload(self, reload: MagicMock, os_path: MagicMock):
"""Should import the specified module and return True."""
target = MagicMock()
target.__file__ = 'fake'
os_path.getmtime.return_value = 10
self.assertTrue(reloading.do_reload(target, 0))
self.assertEqual(1, reload.call_count)
@patch('cauldron.session.reloading.os.path')
@patch('cauldron.session.reloading.importlib.reload')
def test_do_reload_skip(self, reload: MagicMock, os_path: MagicMock):
"""
Should skip reloading the specified module because it hasn't been
modified and return False.
"""
target = MagicMock()
target.__file__ = 'fake'
os_path.getmtime.return_value = 0
self.assertFalse(reloading.do_reload(target, 10))
self.assertEqual(0, reload.call_count)
def test_reload_children_module(self):
"""Should abort as False for a module that has no children."""
target = Mock()
reloading.reload_children(target, 10)
|
sernst/cauldron
|
cauldron/test/session/test_session_reloading.py
|
Python
|
mit
| 4,115 | 0 |
#!/usr/bin/python
# -*- coding: utf8 -*-
"""
Copyright (C) 2012 Xycl
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import xbmc, xbmcgui
import common
from urllib2 import Request, urlopen
from urllib import urlencode
from os.path import join,isfile,basename
import os
from traceback import print_exc
LABEL_TEXT = 100
BUTTON_CLOSE = 101
BUTTON_ZOOM_IN = 102
BUTTON_ZOOM_OUT = 103
GOOGLE_MAP = 200
CANCEL_DIALOG = ( 9, 10, 92, 216, 247, 257, 275, 61467, 61448, )
ACTION_SELECT_ITEM = 7
ACTION_MOUSE_START = 100
ACTION_TAB = 18
SELECT_ITEM = (ACTION_SELECT_ITEM, ACTION_MOUSE_START)
ACTION_DOWN = [4]
ACTION_UP = [3]
class GoogleMap( xbmcgui.WindowXMLDialog ):
def __init__( self, xml, cwd, default):
xbmcgui.WindowXMLDialog.__init__(self)
def onInit( self ):
self.setup_all('')
def onAction( self, action ):
# Close
if ( action.getId() in CANCEL_DIALOG or self.getFocusId() == BUTTON_CLOSE and action.getId() in SELECT_ITEM ):
self.close()
# Zoom in
elif ( action.getId() in SELECT_ITEM and self.getFocusId() == BUTTON_ZOOM_IN or action in ACTION_UP):
self.zoom('+')
# Zoom out
elif ( action.getId() in SELECT_ITEM and self.getFocusId() == BUTTON_ZOOM_OUT or action in ACTION_DOWN):
self.zoom('-')
def set_file(self, filename):
self.filename = filename
def set_place(self, place):
self.place = place
def set_datapath(self, datapath):
self.datapath = datapath
def set_pic(self, pic):
pass
def set_map(self, mapfile):
self.getControl( GOOGLE_MAP ).setImage(mapfile)
def setup_all( self, filtersettings = ""):
self.getControl( LABEL_TEXT ).setLabel( common.getstring(30220) )
self.getControl( BUTTON_CLOSE ).setLabel( common.getstring(30224) )
self.getControl( BUTTON_ZOOM_IN ).setLabel( common.getstring(30225) )
self.getControl( BUTTON_ZOOM_OUT ).setLabel( common.getstring(30226) )
self.zoomlevel = 15
self.zoom_max = 21
self.zoom_min = 0
self.load_map()
def zoom(self,way,step=1):
if way=="+":
self.zoomlevel = self.zoomlevel + step
elif way=="-":
self.zoomlevel = self.zoomlevel - step
else:
self.zoomlevel = step
if self.zoomlevel > self.zoom_max: self.zoomlevel = self.zoom_max
elif self.zoomlevel < self.zoom_min: self.zoomlevel = self.zoom_min
self.load_map()
def load_map(self):
#google geolocalisation
static_url = "http://maps.google.com/maps/api/staticmap?"
param_dic = {#location parameters (http://gmaps-samples.googlecode.com/svn/trunk/geocoder/singlegeocode.html)
"center":"", #(required if markers not present)
"zoom":self.zoomlevel, # 0 to 21+ (req if no markers
#map parameters
"size":"640x640", #widthxheight (required)
"format":"jpg", #"png8","png","png32","gif","jpg","jpg-baseline" (opt)
"maptype":"hybrid", #"roadmap","satellite","hybrid","terrain" (opt)
"language":"",
#Feature Parameters:
"markers" :"color:red|label:P|%s",#(opt)
#markers=color:red|label:P|lyon|12%20rue%20madiraa|marseille|Lille
#&markers=color:blue|label:P|Australie
"path" : "", #(opt)
"visible" : "", #(opt)
#Reporting Parameters:
"sensor" : "false" #is there a gps on system ? (req)
}
param_dic["markers"]=param_dic["markers"]%self.place
request_headers = { 'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; fr; rv:1.9.2.10) Gecko/20100914 Firefox/3.6.10' }
request = Request(static_url+urlencode(param_dic), None, request_headers)
try:
urlfile = urlopen(request)
except:
dialog = xbmcgui.Dialog()
dialog.ok('XBMC Network Error', 'Google maps is not reachable')
self.close()
return
extension = urlfile.info().getheader("Content-Type","").split("/")[1]
filesize = int(urlfile.info().getheader("Content-Length",""))
mappath = xbmc.translatePath(self.datapath)
mapfile = join(self.datapath,basename(self.filename).split(".")[0]+"_maps%s."%self.zoomlevel+extension)
mapfile = xbmc.translatePath(mapfile)
# test existence of path
if not os.path.exists(mappath):
os.makedirs(mappath)
label = self.getControl( LABEL_TEXT )
if not isfile(mapfile):
#mapfile is not downloaded yet, download it now...
try:
#f=open(unicode(mapfile, 'utf-8'),"wb")
f=open(common.smart_unicode(mapfile), "wb")
except:
try:
f=open(common.smart_utf8(mapfile), "wb")
except:
print_exc()
#print "GEO Exception: "+mapfile
for i in range(1+(filesize/10)):
f.write(urlfile.read(10))
label.setLabel(common.getstring(30221)%(100*(float(i*10)/filesize)))#getting map... (%0.2f%%)
urlfile.close()
#pDialog.close()
try:
f.close()
except:
print_exc()
self.set_pic(self.filename)
self.set_map(mapfile)
label.setLabel(common.getstring(30222)%int(100*(float(self.zoomlevel)/self.zoom_max)))#Zoom level %s
|
Xycl/plugin.image.mypicsdb
|
resources/lib/googlemaps.py
|
Python
|
gpl-2.0
| 6,409 | 0.021688 |
from random import randint
import gobject
import clutter
import mxpy as mx
sort_set = False
filter_set = False
def sort_func(model, a, b, data):
return int(a.to_hls()[0] - b.to_hls()[0])
def filter_func(model, iter, data):
color = iter.get(0)[0]
h = color.to_hls()[0]
return (h > 90 and h < 180)
def key_release_cb(stage, event, model):
from clutter import keysyms
global sort_set, filter_set
if event.keyval == keysyms.s:
if not sort_set:
model.set_sort(0, sort_func, None)
else:
model.set_sort(-1, None, None)
sort_set = not sort_set
elif event.keyval == keysyms.f:
if not filter_set:
model.set_filter(filter_func)
else:
model.set_filter(None, None)
filter_set = not filter_set
if __name__ == '__main__':
stage = clutter.Stage()
stage.connect('destroy', clutter.main_quit)
stage.set_color((255, 255, 255, 255))
stage.set_size(320, 240)
color = clutter.Color(0x0, 0xf, 0xf, 0xf)
scroll = mx.ScrollView()
scroll.set_size(*stage.get_size())
stage.add(scroll)
view = mx.ItemView()
scroll.add(view)
model = clutter.ListModel(clutter.Color, "color", float, "size")
for i in range(360):
color = clutter.color_from_hls(randint(0, 255), 0.6, 0.6)
color.alpha = 0xff
model.append(0, color, 1, 32.0)
view.set_model(model)
view.set_item_type(clutter.Rectangle)
view.add_attribute("color", 0)
view.add_attribute("width", 1)
view.add_attribute("height", 1)
stage.connect('key-release-event', key_release_cb, model)
stage.show()
clutter.main()
|
buztard/mxpy
|
examples/test-item-view.py
|
Python
|
lgpl-2.1
| 1,682 | 0.002973 |
# Copyright 2010-2011, Sikuli.org
# Released under the MIT License.
from org.sikuli.script import VDictProxy
import java.io.File
##
# VDict implements a visual dictionary that has Python's conventional dict
# interfaces.
#
# A visual dictionary is a data type for storing key-value pairs using
# images as keys. Using a visual dictionary, a user can easily automate
# the tasks of saving and retrieving arbitrary data objects by images.
# The syntax of the visual dictionary data type is modeled after that of
# the built-in Python dictionary data type.
class VDict(VDictProxy):
##
# the default similarity for fuzzy matching. The range of this is from
# 0 to 1.0, where 0 matches everything and 1.0 does exactly matching.
# <br/>
# The default similarity is 0.7.
_DEFAULT_SIMILARITY = 0.7
_DEFAULT_GET_ITEM_N = 0
##
# Constructs a new visual dictionary with the same mapping as the given dict.
#
def __init__(self, dict=None):
self._keys = {}
if dict:
for k in dict.keys():
self[k] = dict[k]
##
# Returns the number of keys in this visual dictionary.
#
def __len__(self):
return self.size()
##
# Maps the specified key to the specified item in this visual dictionary.
#
def __setitem__(self, key, item):
self.insert(key, item)
self._keys[key] = item
##
# Tests if the specified object looks like a key in this visual dictionary
# with the default similarity.
#
def __contains__(self, key):
return len(self.get(key)) > 0
##
# Returns all values to which the specified key is fuzzily matched in
# this visual dictionary with the default similarity.
# <br/>
# This is a wrapper for the {@link #VDict.get get} method.
def __getitem__(self, key):
return self.get(key)
##
# Deletes the key and its corresponding value from this visual dictionary.
#
def __delitem__(self, key):
self.erase(key)
del self._keys[key]
##
# Returns a list of the keys in this visual dictionary.
#
def keys(self):
return self._keys.keys()
##
# Returns the value to which the specified key is exactly matched in
# this visual dictionary.
#
def get_exact(self, key):
if key == None: return None
return self.lookup(key)
##
# Returns the values to which the specified key is fuzzily matched in
# this visual dictionary with the given similarity and the given maximum
# number of return items.
# @param similarity the similarity for matching.
# @param n maximum number of return items.
#
def get(self, key, similarity=_DEFAULT_SIMILARITY, n=_DEFAULT_GET_ITEM_N):
if key == None: return None
return self.lookup_similar_n(key, similarity, n)
##
# Returns the value to which the specified key is best matched in
# this visual dictionary with the given similarity.
# @param similarity the similarity for matching.
#
def get1(self, key, similarity=_DEFAULT_SIMILARITY):
if key == None: return None
return self.lookup_similar(key, similarity)
|
ck1125/sikuli
|
sikuli-script/src/main/python/sikuli/VDict.py
|
Python
|
mit
| 3,120 | 0.030128 |
import numpy as np
import matplotlib.pyplot as pl
class BasicHMC(object):
def __init__(self, model=None, verbose=True):
"""A basic HMC sampling object.
:params model:
An object with the following methods:
* lnprob(theta)
* lnprob_grad(theta)
* (optional) check_constrained
:params verbose:
bool, print lots of junk?
"""
self.verbose = verbose
self.model = model
self.has_bounds = hasattr(self.model, 'check_constrained')
self.set_mass_matrix()
def lnprob(self, theta):
return self.model.lnprob(theta)
def lnprob_grad(self, theta):
return self.model.lnprob_grad(theta)
def sample(self, initial, iterations=1, epsilon=None,
mass_matrix=None, length=10, sigma_length=0.0,
store_trajectories=False):
"""Sample for `iterations` trajectories (i.e., compute that many
trajectories, resampling the momenta at the end of each trajectory).
:params initial:
The initial position from which to start the sampling. ndarray of
shape (`ndim`,)
:param iterations:
The number of trajectories to compute. Integer.
:param epsilon: (optional, default: None)
The stepsize for the leapfrog integrator. Scalar float or ndarray
of shape (ndim,). If `None`, a scalar value will be crudely
estimated.
:param mass_matrix: (optional, default: None)
"Masses" in each dimension used to rescale the momentum vectors in
the HMC trajectories. Ideally this would be the inverse of the
covariance matrix of the posterior PDF. If `None` all masses will be
assumed 1. Otherwise can be ndarray of shape (ndim,) for a
diagonal covariance matrix or (ndim, ndim), in which case it must
be positive semi-definite.
:param length:
Number of leapfrog steps to take in each trajectory. Integer.
:param sigma_length: (optional, default: 0.0)
The dispersion in the length of each trajectory. If greater than
zero, the length of each trajectory will be drawn from a gaussian
with mean `length` and dispersion `sigma_length`
:param store_trajectories:
If `True`, store not just the endpoints of each trajectory but the
steps along each trajectory in a `trajectories` attribute.
"""
self.ndim = len(initial)
self.store_trajectories = store_trajectories
# set some initial values
self.set_mass_matrix(mass_matrix)
if epsilon is None:
epsilon = self.find_reasonable_stepsize(initial.copy())
print('using epsilon = {0}'.format(epsilon))
self.mu = np.log(10 * epsilon)
# set up the output
self.reset()
self.chain = np.zeros([iterations, self.ndim])
self.lnp = np.zeros([iterations])
self.accepted = np.zeros([iterations])
if self.store_trajectories:
self.trajectories = []
theta = initial.copy()
self.traj_num = 0
# loop over trajectories
lnp, grad = None, None # initial P and lnP are unknown
for i in xrange(int(iterations)):
ll = int(np.clip(np.round(np.random.normal(length, sigma_length)), 2, np.inf))
if self.verbose:
print('eps={:3.8f}, L={:5.0f}'.format(epsilon, ll))
info = self.trajectory(theta, epsilon, ll, lnP0=lnp, grad0=grad)
theta, lnp, grad, accepted = info
self.lnp[i] = lnp
self.chain[i, :] = theta
self.accepted[i] = accepted
self.traj_num += 1
return theta, lnp, grad
def trajectory(self, theta0, epsilon, length, lnP0=None, grad0=None):
"""Compute one trajectory for a given starting location, epsilon, and
length. The momenta in each direction are drawn from a gaussian before
performing 'length' leapfrog steps. If the trajectories attribute
exists, store the path of the trajectory.
:param theta0:
Starting position, ndarray of shape (ndim,)
:param epsilon:
Stepsize(s) to use for this trajectory. scalar float or ndarray of shape (ndim,)
:param length:
The length of this trajectory, integer.
:param lnP0: optional
The lnprob value of the initial position (can be used to save a call to lnprob)
:param grad0: optional
The gradients of the lnprob function at `theta0`, ndarray of shape (ndim,)
:returns theta:
The final position vector, which if the trajectory was not accepted
will be equal to the initial position. ndarray of shape (ndim,)
:returns lnP:
The ln-probability at the final position, float.
:returns grad:
The gradient of the ln-probability at the final position, ndarray of shape (ndim,)
:returns accepted:
Whether the trajectory was accepted (1.0) or not (0.0)
"""
if self.store_trajectories:
self.trajectories.append(np.zeros([length, self.ndim]))
# --- Set up for the run ----
# save initial position
theta = theta0.copy()
# random initial momenta
p0 = self.draw_momentum()
# gradient in U at initial position, negative of gradient lnP
if grad0 is None:
grad0 = -self.lnprob_grad(theta0)
if lnP0 is None:
lnP0 = self.lnprob(theta0)
# use copies of initial momenta and gradient
p, grad = p0.copy(), grad0.copy()
# --- Compute Trajectory ---
# do 'length' leapfrog steps along the trajectory (and store?)
for step in xrange(int(length)):
theta, p, grad = self.leapfrog(theta, p, epsilon, grad,
check_oob=self.has_bounds)
if self.store_trajectories:
self.trajectories[-1][step, :] = theta
# ---- Accept/Reject ---
# Odds ratio of the proposed move
lnP = self.lnprob(theta)
# change in potential = negative change in lnP
dU = lnP0 - lnP
# change in kinetic
dK = self.kinetic_energy(p) - self.kinetic_energy(p0)
# acceptance criterion
alpha = np.exp(-dU - dK)
if self.verbose:
print('H={0}, dU={1}, dK={2}'.format(alpha, dU, dK))
# Accept or reject
if np.random.uniform(0, 1) < alpha:
accepted = 1.0
return theta, lnP, grad, accepted
else:
accepted = 0.0
return theta0, lnP0, grad0, accepted
def leapfrog(self, q, p, epsilon, grad, check_oob=False):
"""Perfrom one leapfrog step, updating the momentum and position
vectors. This uses one call to the model.lnprob_grad() function, which
must be defined. It also performs an optional check on the value of the
new position to make sure it satistfies any parameter constraints, for
which the check_constrained method of model is called.
"""
# half step in p
p -= 0.5 * epsilon * grad
# full step in theta
q += epsilon * self.velocity(p)
# check for constraints on theta
while check_oob:
q, sign, check_oob = self.model.check_constrained(q)
p *= sign # flip the momentum if necessary
# compute new gradient in U, which is negative of gradient in lnP
grad = -self.lnprob_grad(q)
# another half step in p
p -= 0.5 * epsilon * grad
return q, p, grad
def draw_momentum(self):
if self.ndim_mass == 0:
p = np.random.normal(0, 1, self.ndim)
elif self.ndim_mass == 1:
p = np.random.normal(0, np.sqrt(self.mass_matrix))
else:
p = np.random.multivariate_normal(np.zeros(self.ndim), self.mass_matrix)
return p
def velocity(self, p):
"""Get the velocities given a momentum vector.
"""
if self.ndim_mass == 0:
v = p # Masses all = 1
elif self.ndim_mass == 1:
v = self.inverse_mass_matrix * p
#v = p
else:
#v = np.dot(self.cho_factor, p)
v = np.dot(self.inverse_mass_matrix, p)
return v
def kinetic_energy(self, p):
"""Get the kinetic energy given momenta
"""
if self.ndim_mass == 0:
K = np.dot(p, p)
elif self.ndim_mass == 1:
K = np.dot(p, self.inverse_mass_matrix * p)
else:
K = np.dot(p.T, np.dot(self.inverse_mass_matrix, p))
return 0.5 * K
def set_mass_matrix(self, mass_matrix=None):
"""Cache the inverse of the mass matrix, and set a flag for the
dimensionality of the mass matrix. Instead of flags that control
operation through branch statements, should probably use subclasses for
different types of mass matrix.
"""
self.mass_matrix = mass_matrix
if mass_matrix is None:
self.inverse_mass_matrix = 1
self.ndim_mass = 0
elif mass_matrix.ndim == 1:
self.inverse_mass_matrix = 1. / mass_matrix
self.ndim_mass = 1
elif mass_matrix.ndim == 2:
self.inverse_mass_matrix = np.linalg.inv(mass_matrix)
self.ndim_mass = 2
print(mass_matrix, self.ndim_mass)
def langevin(self):
"""Special case of length = 1 trajectories"""
raise(NotImplementedError)
def find_reasonable_stepsize(self, q0, epsilon_guess=1.0):
"""Estimate a reasonable value of the stepsize
"""
epsilon = epsilon_guess
lnP0, grad0 = self.lnprob(q0.copy()), self.lnprob_grad(q0.copy())
p0 = self.draw_momentum()
condition, a, i = True, 0, 0
while condition:
p = p0.copy()
epsilon = 2.**a * epsilon
qprime, pprime, gradprime = self.leapfrog(q0.copy(), p, epsilon, grad0,
check_oob=self.has_bounds)
lnP = self.lnprob(qprime)
# change in potential
dU = lnP0 - lnP
# change in kinetic
dK = self.kinetic_energy(pprime) - self.kinetic_energy(p0)
alpha = np.exp(-dU - dK)
if a == 0: # this is the first try
a = 2 * (alpha > 0.5) - 1.0 # direction to change epsilon in the future, + or -
condition = (alpha**a) > (2**(-a))
i += 1
print(i, epsilon, alpha)
if alpha is 0.0:
raise ValueError('alpha is 0')
return epsilon
def reset(self):
# use this to keep track of the trajectory number within the trajectory
# (for storage)
self.traj_num = 0
class TestModel(object):
"""A simple correlated normal distribution to sample.
"""
def __init__(self, Sigma=None):
if Sigma is None:
Sigma = np.array([[1., 1.8], [1.8, 4.]])
self.A = np.linalg.inv(Sigma)
self.has_constraints = False
def lnprob_grad(self, theta):
return -np.dot(self.A, theta)
def lnprob(self, theta):
return 0.5 * np.dot(theta.T, self.lnprob_grad(theta))
class MixModel(object):
"""A simple line in 2-d space (but constrained) to sample.
"""
def __init__(self):
self.A = np.array([10., 20.])
# constraints
self.lower = 0.
self.upper = 10.
def model(self, theta):
# super simple model
return (self.A * theta).sum()
def lnprob(self, theta):
# probability of that simple model given observations (which must be defined)
return -0.5 * ((self.model(theta) - self.obs)**2 /
self.obs_unc**2).sum()
def lnprob_grad(self, theta):
# with simple gradients of the probbility
grad = -(self.model(theta)-self.obs)/self.obs_unc**2 * self.A
return grad
def check_constrained(self, theta):
"""Method that checks the value of theta against constraints.
If theta is above or below the boundaries, the sign of the momentum
is flipped and theta is adjusted as if the trajectory had
bounced off the constraint. Returns the new theta vector, a
vector of multiplicative signs for the momenta, and a flag for
if the values are still out of bounds.
"""
# initially no flips
sign = np.ones_like(theta)
oob = True # pretend we started out-of-bounds to force at least one check
#print('theta_in ={0}'.format(theta))
while oob:
above = theta > self.upper
theta[above] = 2*self.upper - theta[above]
sign[above] *= -1
below = theta < self.lower
theta[below] = 2*self.lower - theta[below]
sign[below] *= -1
oob = np.any(below | above)
#print('theta_out ={0}'.format(theta))
return theta, sign, oob
def test_mix_hmc(epsilon=0.2, length=10, iterations=100, snr=10):
"""Sample the mixing model using hmc, and plot the results.
"""
model = MixModel()
D = 2
#generate the mock
mock_theta = np.random.uniform(1, 5, D)
#print('mock_theta={0}'.format(mock_theta))
mock = model.model(mock_theta)
noised_mock = mock * (1 + np.random.normal(0, 1, 1) / snr)
noise = mock/snr
#add the mock to the model
model.obs = noised_mock
model.obs_unc = noise
theta0 = np.random.uniform(0, 10, D)
#initialize sampler and sample
sampler = BasicHMC(model, verbose=False)
pos, prob, eps = sampler.sample(theta0, iterations=iterations,
epsilon=epsilon, length=length,
store_trajectories=True)
print mock_theta/(np.mean(pos, axis=0))
print('mock_theta = {0}'.format(mock_theta))
#plot trajectories
pl.figure(1)
pl.clf()
color = ['red', 'blue']
pl.plot(sampler.chain[::10, 0], sampler.chain[::10, 1], '.', label='Thinned samples')
for it in np.arange(20) + int(iterations/3):
pl.plot(sampler.trajectories[it, :, 0],
sampler.trajectories[it, :, 1],
color=color[int(sampler.accepted[it])])
pl.plot(mock_theta[0], mock_theta[1], 'g.', markersize=20, label='Truth (noiseless)')
pl.plot(theta0[0], theta0[1], 'c.', markersize=15, label='Initial')
pl.legend(loc='upper right')
pl.title(r'$Z = \theta^T x$, $\theta >0$, $\epsilon = ${0}, Length = {1}, $f_{{accept}} =$ {2}'.format(epsilon, length, sampler.accepted.sum()/iterations))
pl.xlabel(r'$\theta_1$')
pl.ylabel(r'$\theta_2$')
pl.show()
return sampler
def test_hmc(verbose=False, Sigma=None, **sample_kwargs):
"""sample the correlated normal using hmc"""
model = TestModel(Sigma=Sigma)
D = 2
theta0 = np.random.normal(0, 1, D)
sampler = BasicHMC(model, verbose=verbose)
pos, prob, eps = sampler.sample(theta0.copy(), **sample_kwargs)
print(theta0)
print(np.std(sampler.chain, axis=0))
print(sampler.accepted.mean())
return sampler
|
bd-j/hmc
|
hmc.py
|
Python
|
gpl-2.0
| 15,392 | 0.001559 |
#!/usr/bin/python2
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the (Linux) executables produced by gitian only contain
allowed gcc, glibc and libstdc++ version symbols. This makes sure they are
still compatible with the minimum supported Linux distribution versions.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py
'''
from __future__ import division, print_function, unicode_literals
import subprocess
import re
import sys
import os
# Debian 6.0.9 (Squeeze) has:
#
# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.11.3 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libc6)
# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libstdc%2B%2B6)
#
# Ubuntu 10.04.4 (Lucid Lynx) has:
#
# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid§ion=all)
# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid§ion=all)
# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid§ion=all&arch=any&keywords=libstdc%2B%2B&searchon=names)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.4.0: GCC_4.4.0
# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3
# (glibc) GLIBC_2_11
#
MAX_VERSIONS = {
'GCC': (4,4,0),
'CXXABI': (1,3,3),
'GLIBCXX': (3,4,13),
'GLIBC': (2,11)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
b'_edata', b'_end', b'_init', b'__bss_start', b'_fini', b'_IO_stdin_used'
}
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
# Allowed NEEDED libraries
ALLOWED_LIBRARIES = {
# infinitumd and infinitum-qt
b'libgcc_s.so.1', # GCC base support
b'libc.so.6', # C library
b'libpthread.so.0', # threading
b'libanl.so.1', # DNS resolve
b'libm.so.6', # math library
b'librt.so.1', # real-time (clock)
b'ld-linux-x86-64.so.2', # 64-bit dynamic linker
b'ld-linux.so.2', # 32-bit dynamic linker
# infinitum-qt only
b'libX11-xcb.so.1', # part of X11
b'libX11.so.6', # part of X11
b'libxcb.so.1', # part of X11
b'libfontconfig.so.1', # font support
b'libfreetype.so.6', # font parsing
b'libdl.so.2' # programming interface to dynamic linker
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def __call__(self, mangled):
self.proc.stdin.write(mangled + b'\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True):
'''
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
syms = []
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>7 and re.match(b'[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition(b'@')
is_import = line[6] == b'UND'
if version.startswith(b'@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version))
return syms
def check_version(max_versions, version):
if b'_' in version:
(lib, _, ver) = version.rpartition(b'_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split(b'.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib]
def read_libraries(filename):
p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.split(b'\n'):
tokens = line.split()
if len(tokens)>2 and tokens[1] == b'(NEEDED)':
match = re.match(b'^Shared library: \[(.*)\]$', b' '.join(tokens[2:]))
if match:
libraries.append(match.group(1))
else:
raise ValueError('Unparseable (NEEDED) specification')
return libraries
if __name__ == '__main__':
cppfilt = CPPFilt()
retval = 0
for filename in sys.argv[1:]:
# Check imported symbols
for sym,version in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version):
print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym).decode('utf-8'), version.decode('utf-8')))
retval = 1
# Check exported symbols
for sym,version in read_symbols(filename, False):
if sym in IGNORE_EXPORTS:
continue
print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym).decode('utf-8')))
retval = 1
# Check dependency libraries
for library_name in read_libraries(filename):
if library_name not in ALLOWED_LIBRARIES:
print('%s: NEEDED library %s is not allowed' % (filename, library_name.decode('utf-8')))
retval = 1
exit(retval)
|
fcecin/infinitum
|
contrib/devtools/symbol-check.py
|
Python
|
mit
| 6,197 | 0.011457 |
from __future__ import unicode_literals
__all__ = (
'Key',
'Keys',
)
class Key(object):
def __init__(self, name):
#: Descriptive way of writing keys in configuration files. e.g. <C-A>
#: for ``Control-A``.
self.name = name
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.name)
class Keys(object):
Escape = Key('<Escape>')
ControlA = Key('<C-A>')
ControlB = Key('<C-B>')
ControlC = Key('<C-C>')
ControlD = Key('<C-D>')
ControlE = Key('<C-E>')
ControlF = Key('<C-F>')
ControlG = Key('<C-G>')
ControlH = Key('<C-H>')
ControlI = Key('<C-I>') # Tab
ControlJ = Key('<C-J>') # Enter
ControlK = Key('<C-K>')
ControlL = Key('<C-L>')
ControlM = Key('<C-M>') # Enter
ControlN = Key('<C-N>')
ControlO = Key('<C-O>')
ControlP = Key('<C-P>')
ControlQ = Key('<C-Q>')
ControlR = Key('<C-R>')
ControlS = Key('<C-S>')
ControlT = Key('<C-T>')
ControlU = Key('<C-U>')
ControlV = Key('<C-V>')
ControlW = Key('<C-W>')
ControlX = Key('<C-X>')
ControlY = Key('<C-Y>')
ControlZ = Key('<C-Z>')
ControlSpace = Key('<C-Space>')
ControlBackslash = Key('<C-Backslash>')
ControlSquareClose = Key('<C-SquareClose>')
ControlCircumflex = Key('<C-Circumflex>')
ControlUnderscore = Key('<C-Underscore>')
ControlLeft = Key('<C-Left>')
ControlRight = Key('<C-Right>')
ControlUp = Key('<C-Up>')
ControlDown = Key('<C-Down>')
Up = Key('<Up>')
Down = Key('<Down>')
Right = Key('<Right>')
Left = Key('<Left>')
Home = Key('<Home>')
End = Key('<End>')
Delete = Key('<Delete>')
ShiftDelete = Key('<ShiftDelete>')
PageUp = Key('<PageUp>')
PageDown = Key('<PageDown>')
BackTab = Key('<BackTab>') # shift + tab
Tab = ControlI
Backspace = ControlH
F1 = Key('<F1>')
F2 = Key('<F2>')
F3 = Key('<F3>')
F4 = Key('<F4>')
F5 = Key('<F5>')
F6 = Key('<F6>')
F7 = Key('<F7>')
F8 = Key('<F8>')
F9 = Key('<F9>')
F10 = Key('<F10>')
F11 = Key('<F11>')
F12 = Key('<F12>')
F13 = Key('<F13>')
F14 = Key('<F14>')
F15 = Key('<F15>')
F16 = Key('<F16>')
F17 = Key('<F17>')
F18 = Key('<F18>')
F19 = Key('<F19>')
F20 = Key('<F20>')
# Matches any key.
Any = Key('<Any>')
# Special
CPRResponse = Key('<Cursor-Position-Response>')
|
jaseg/python-prompt-toolkit
|
prompt_toolkit/keys.py
|
Python
|
bsd-3-clause
| 2,546 | 0.007855 |
from django.core.urlresolvers import resolve, Resolver404
from django.test import TestCase
from conman.routes import views
class RouteRouterViewTest(TestCase):
"""Test the route_router view."""
def assert_url_uses_router(self, url):
"""Check a url resolves to the route_router view."""
resolved_view = resolve(url)
self.assertEqual(resolved_view.func, views.route_router)
def test_blank_url(self):
"""Blank urls should not resolve.
This is actually a test of django, as urls must start with `/`.
"""
with self.assertRaises(Resolver404):
self.assert_url_uses_router('')
def test_double_slash_url(self):
"""Trailing slashes should trail something."""
with self.assertRaises(Resolver404):
self.assert_url_uses_router('//')
def test_root_url(self):
"""The root url is resolved using views.route_router."""
self.assert_url_uses_router('/')
def test_child_url(self):
"""A child url is resolved using views.route_router."""
self.assert_url_uses_router('/slug/')
def test_nested_child_url(self):
"""A nested child url is resolved using views.route_router."""
self.assert_url_uses_router('/foo/bar/')
def test_numerical_url(self):
"""A numeric url is resolved using views.route_router."""
self.assert_url_uses_router('/meanings/42/')
def test_without_trailing_slash(self):
"""A url without a trailing slash is not resolved by views.route_router."""
with self.assertRaises(Resolver404):
self.assert_url_uses_router('/fail')
|
meshy/django-conman
|
tests/routes/test_urls.py
|
Python
|
bsd-2-clause
| 1,649 | 0.000606 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^', 'apps.neo_graph_test.views.create_graph', name='create_graph'),
url(r'^', include('apps.citizens.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
levithomason/neo
|
apps/neo_graph_test/urls.py
|
Python
|
mit
| 388 | 0.002577 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import re
import os
from urllib.request import urlretrieve
from urllib.request import urlopen
from urllib.request import build_opener, HTTPCookieProcessor
from urllib.parse import urlencode, quote
from http.cookiejar import CookieJar
from configparser import SafeConfigParser
from imghdr import what
from bs4 import BeautifulSoup
from PIL import Image
import pymysql
from subprocess import Popen, PIPE
from mvdl import *
from pixivpy3 import *
dlDir = "./images/"
dlDir_mov = "./mov/"
thumbDir = "./images/thumbnail/"
thumb_lDir = "./images/thumbnail_l/"
def thumbnail(input_file, output_file):
size = 150
img = Image.open(input_file)
w,h = img.size
l,t,r,b = 0,0,size,size
new_w, new_h = size,size
if w>=h:
new_w = size * w // h
l = (new_w - size) // 2
r = new_w - l
else:
new_h = size * h // w
t = (new_h - size) // 2
b = new_h - t
thu = img.resize((new_w, new_h), Image.ANTIALIAS)
thu = thu.crop((l,t,r,b))
thu.save(thumbDir + output_file, quality=100, optimize=True)
thu = img.resize((w*300//h, 300), Image.ANTIALIAS)
thu.save(thumb_lDir + output_file, quality=100, optimize=True)
def regImg(loc, orig, thum, type, mov=0):
nick = ""
channel = ""
if len(sys.argv) == 4:
nick = os.fsencode(sys.argv[2]).decode('utf-8')
channel = os.fsencode(sys.argv[3]).decode('utf-8')
conn = pymysql.connect(host='127.0.0.1',user='maobot',
passwd='msc3824',db='maobot',charset='utf8')
cur = conn.cursor()
if mov == 0:
statement = "INSERT INTO images (user,channel,loc,orig,thum,type) VALUES(%s, %s, %s, %s, %s, %s)"
elif mov == 1:
statement = "INSERT INTO movies (user,channel,loc,orig,thum,type) VALUES(%s, %s, %s, %s, %s, %s)"
data = (nick, channel, loc, orig, thum, type)
cur.execute(statement, data)
cur.connection.commit()
cur.close()
conn.close()
def readConfig():
config = SafeConfigParser()
if os.path.exists('imgdl.ini'):
config.read('imgdl.ini')
else:
print("No Configuration File.")
sys.exit(2)
try:
nicouser = config.get('nicoseiga.jp', 'user')
nicopass = config.get('nicoseiga.jp', 'pass')
except Exception as e:
return "error: could not read nico configuration." + e
try:
pixiuser = config.get('pixiv.net', 'user')
pixipass = config.get('pixiv.net', 'pass')
except Exception as e:
return "error: could not read pixiv configuration." + e
return nicouser, nicopass, pixiuser, pixipass
def main():
orig_url = sys.argv[1]
html = urlopen(orig_url)
nicouser, nicopass, pixiuser, pixipass = readConfig()
bsObj = BeautifulSoup(html, "lxml")
twi = re.compile('https:\/\/twitter.com\/[a-zA-Z0-9_]+\/status\/\d+')
nic = re.compile('http:\/\/seiga.nicovideo.jp\/seiga\/[a-zA-Z0-9]+')
pix1 = re.compile('https?:\/\/www.pixiv.net\/member_illust.php\?mode=medium\&illust_id=[0-9]+')
pix2 = re.compile('https?:\/\/www.pixiv.net\/member_illust.php\?illust_id=[0-9]+\&mode=medium')
pix_ = re.compile('https?:\/\/www.pixiv.net\/member_illust.php\?mode=manga_big\&illust_id=[0-9]+\&page=[0-9]+')
nico_mov = re.compile('https?:\/\/www.nicovideo.jp\/watch\/[a-zA-Z0-9]+')
yout_mov = re.compile('https:\/\/www.youtube.com\/watch\?v=[a-zA-Z0-9]+')
image_format = ["jpg", "jpeg", "gif", "png"]
if twi.match(orig_url):
images = bsObj.find("div", {"class": "permalink-tweet-container"}).find("div", {"class": "AdaptiveMedia-container"}).findAll("div", {"class": "AdaptiveMedia-photoContainer"})
for item in images:
imageLoc = item.find("img")["src"]
urlretrieve(imageLoc , dlDir + "twi" + imageLoc[28:])
loc = dlDir+"twi"+imageLoc[28:]
thumb = "thumb_twi" + imageLoc[28:]
type = what(loc)
thumbnail(loc, thumb)
regImg(loc, orig_url, "./images/thumbnail/"+thumb, type)
print(thumb_lDir+thumb)
elif nic.match(orig_url):
opener = build_opener(HTTPCookieProcessor(CookieJar()))
post = {
'mail_tel': nicouser,
'password': nicopass
}
data = urlencode(post).encode("utf_8")
response = opener.open('https://secure.nicovideo.jp/secure/login', data)
response.close()
image_id = orig_url[34:]
with opener.open('http://seiga.nicovideo.jp/image/source?id=' + image_id) as response:
bsObj = BeautifulSoup(response)
imageLoc = bsObj.find("div", {"class": "illust_view_big"}).find("img")["src"]
dlLoc = dlDir + "nic" + image_id
urlretrieve('http://lohas.nicoseiga.jp' + imageLoc, dlLoc)
type = what(dlLoc)
loc = dlLoc + "." + type
os.rename(dlLoc, loc)
thumb = "thumb_nico"+image_id+"."+type
print(thumb_lDir+thumb)
thumbnail(loc, thumb)
regImg(loc, orig_url, "./images/thumbnail/"+thumb, type)
elif pix1.match(orig_url) or pix2.match(orig_url):
imageLocs = []
image_id = re.search('\d+', orig_url).group()
api = AppPixivAPI()
api.login(pixiuser, pixipass)
json_result = api.illust_detail(image_id, req_auth=True)
illust = json_result.illust
if "original" in illust.image_urls:
imageLocs.append(illust.image_urls.original)
elif "meta_pages" in illust and len(illust.meta_pages)!=0:
for i in illust.meta_pages:
imageLocs.append(i.image_urls.original)
elif "meta_single_page" in illust:
imageLocs.append(illust.meta_single_page.original_image_url)
# print(imageLocs)
for imageLoc in imageLocs:
api.download(imageLoc, path=dlDir, name="pix" + imageLoc.split("/")[-1])
loc = dlDir + "pix" + imageLoc.split("/")[-1]
type = what(loc)
thumb = "thumb_pix"+imageLoc.split("/")[-1]
thumbnail(loc, thumb)
regImg(loc, orig_url, "./images/thumbnail/"+thumb, type)
print(thumb_lDir+thumb)
elif pix_.match(orig_url):
imageLocs = []
reg = re.compile("https?:\/\/www.pixiv.net\/member_illust.php\?mode=manga_big\&illust_id=(\d+)\&page=(\d+)")
image_id = int(reg.match(orig_url).group(1))
page = int(reg.match(orig_url).group(2))
api = AppPixivAPI()
api.login(pixiuser, pixipass)
json_result = api.illust_detail(image_id, req_auth=True)
imageLocs.append(json_result.illust.meta_pages[page].image_urls.original)
for imageLoc in imageLocs:
api.download(imageLoc, path=dlDir, name="pix" + imageLoc.split("/")[-1])
loc = dlDir + "pix" + imageLoc.split("/")[-1]
type = what(loc)
thumb = "thumb_pix"+imageLoc.split("/")[-1]
thumbnail(loc, thumb)
regImg(loc, orig_url, "./images/thumbnail/"+thumb, type)
print(thumb_lDir+thumb)
elif nico_mov.match(orig_url):
proc = Popen(["./mvdl.py", orig_url], stdout=PIPE, stderr=PIPE)
retcode = proc.poll()
elif orig_url.split(".")[-1] in image_format:
filename = "_".join(quote(orig_url).split("/")[-2:])
if len(filename) > 10:
from datetime import datetime
filename = datetime.now().strftime('%s') + filename[-10:]
loc = dlDir + filename
thumb = "thumb_"+filename
urlretrieve(orig_url , loc)
type = what(loc)
if type == None:
type = orig_url.split(".")[-1]
thumbnail(loc, thumb)
print(thumb_lDir+thumb)
regImg(loc, orig_url, "./images/thumbnail/"+thumb, type)
if __name__ == '__main__' :
main()
|
utopianf/maobot_php
|
imgdl.py
|
Python
|
mit
| 7,849 | 0.010957 |
import Gears as gears
from .. import *
from ..Pif.Base import *
class Min(Base) :
def applyWithArgs(
self,
spass,
functionName,
*,
pif1 : 'First operand. (Pif.*)'
= Pif.Solid( color = 'white' ),
pif2 : 'Second operand. (Pif.*)'
= Pif.Solid( color = 'white' )
) :
stimulus = spass.getStimulus()
pif1.apply(spass, functionName + '_op1')
pif2.apply(spass, functionName + '_op2')
spass.setShaderFunction( name = functionName, src = self.glslEsc( '''
vec3 @<pattern>@ (vec2 x, float time){
return min( @<pattern>@_op1(x), @<pattern>@_op2(x) );
}
''').format( pattern=functionName ) )
|
szecsi/Gears
|
GearsPy/Project/Components/Composition/Min.py
|
Python
|
gpl-2.0
| 828 | 0.03744 |
from biohub.core.plugins import PluginConfig
class BadPluginConfig(PluginConfig):
name = 'tests.core.plugins.bad_plugin'
title = 'My Plugin'
author = 'hsfzxjy'
description = 'This is my plugin.'
def ready(self):
raise ZeroDivisionError
|
igemsoftware2017/USTC-Software-2017
|
tests/core/plugins/bad_plugin/apps.py
|
Python
|
gpl-3.0
| 268 | 0 |
from util.tipo import tipo
class S_PARTY_MEMBER_INTERVAL_POS_UPDATE(object):
def __init__(self, tracker, time, direction, opcode, data):
print(str(type(self)).split('.')[3]+'('+str(len(data))+'): '+ str(data.get_array_hex(1))[1:-1])
|
jeff-alves/Tera
|
game/message/unused/S_PARTY_MEMBER_INTERVAL_POS_UPDATE.py
|
Python
|
mit
| 246 | 0.012195 |
from tkinter import *
import mysql.connector as mysql
from MySQLdb import dbConnect
from HomeOOP import *
import datetime
from PIL import Image, ImageTk
class MainMenu(Frame):
def __init__(self, parent): #The very first screen of the web app
Frame.__init__(self, parent)
w, h = parent.winfo_screenwidth(), parent.winfo_screenheight()
#parent.overrideredirect(1)
parent.geometry("%dx%d+0+0" % (w, h))
frame = Frame(parent, width=w, height=h).place(x=350, y=450)
# frame.pack(expand=True)
# canvas = Canvas(parent, width=w, height=h)
# scale_width = w / 3900
# scale_height = h / 2613
web = "https://raw.githubusercontent.com/ACBL-Bridge/Bridge-Application/master/Login/"
URL = "login_background_resized.jpg"
u = urlopen(web + URL)
raw_data = u.read()
u.close()
im = Image.open(BytesIO(raw_data))
bckgrd = ImageTk.PhotoImage(im)
login_bckgrd = Label(frame, image=bckgrd)
login_bckgrd.image = bckgrd
login_bckgrd.place(x=0, y=0, relwidth=1, relheight=1)
titleLabel = Label(frame, text="LET'S PLAY BRIDGE", fg="black", font='Arial 36')
titleLabel.pack(side="top", pady=150)
loginButton = Button(frame, text="Existing User", fg="blue", font="Arial 14", command=lambda: self.LoginScreen(parent))
loginButton.pack(side='top')
signupButton = Button(frame, text="Sign up", fg="blue", font="Arial 14", command=self.SignupScreen)
signupButton.pack(side="top")
quitButton = Button(frame, text="Quit", font="Arial 14", command=self.SignupScreen)
quitButton.pack(side="top")
####################################Login - GUI ###########################
def LoginScreen(self,parent):
global entry_user
global entry_pass
top = Toplevel(self)
top.title("Log In - ABCL")
w, h = top.winfo_screenwidth(), top.winfo_screenheight()
top.overrideredirect(1)
top.geometry("550x400+%d+%d" % (w/2-275, h/2-125)) #250
#top.configure(background = 'white')
quitButton = Button(top, text="Go Back", font="Arial 14", command= top.destroy).pack(side="bottom", padx=20)
#entry_user = StringVar()
#entry_pass = StringVar()
# Frames to divide the window into three parts.. makes it easier to organize the widgets
topFrame = Frame(top)
topFrame.pack()
middleFrame = Frame(top)
middleFrame.pack(pady=50)
bottomFrame = Frame(top)
bottomFrame.pack(side=BOTTOM)
# Widgets and which frame they are in
#label = Label(topFrame, text="LET'S PLAY BRIDGE")
userLabel = Label(middleFrame, text='Username:', font="Arial 14")
passLabel = Label(middleFrame, text='Password:', font="Arial 14")
entry_user = Entry(middleFrame) # For DB
entry_pass = Entry(middleFrame, show ='*') # For DB
b = Button(bottomFrame, text="Log In",fg ="blue", font ="Arial 14", command=lambda: get_Login_input(self, parent))
#Location of the Widgets in their frames
#label.pack(side="top", fill="both", expand=True, padx=20, pady=20)
userLabel.grid(row=10, column=0, sticky=W, padx=20)
entry_user.grid(row=10, column=1, padx=20)
passLabel.grid(row=11, column=0, sticky=W, padx=20)
entry_pass.grid(row=11, column=1, padx=20)
b.grid(row=12, columnspan=2)
###############################################DATABASE Check Login!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def go_to_HomePage(user):
root = Tk()
app = Home(root,user)
root.mainloop()
def get_Login_input(self, parent):
var = dbConnect()
dbconn = mysql.connect(host=var.host, user=var.user, password=var.password, db=var.db)
cur = dbconn.cursor() # Cursor object - required to execute all queries
cur.execute("SELECT username FROM playerinfo WHERE username = '%s' AND password = '%s'" % (entry_user.get(), entry_pass.get()))
rows = cur.fetchall()
if rows:
cur.execute("SELECT firstname, lastname, username FROM playerinfo WHERE username = '%s' AND password = '%s'" % (entry_user.get(), entry_pass.get()))
for namerow in cur.fetchall(): # print all the first cell
fn = namerow[0] #store firstname
ln = namerow[1] #store lastname
user = namerow[2]
self.destroy()
parent.destroy()
go_to_HomePage(user)
'''top = Toplevel(self)
w, h = top.winfo_screenwidth(), top.winfo_screenheight()
top.overrideredirect(1)
top.geometry("%dx%d+0+0" % (w, h))
# Frames to divide the window into three parts.. makes it easier to organize the widgets
topFrame = Frame(top)
topFrame.pack()
middleFrame = Frame(top)
middleFrame.pack(pady=250)
bottomFrame = Frame(top)
bottomFrame.pack(side=BOTTOM)
myProfileButton = Button(middleFrame, text="My Profile", fg="blue", font="Arial 14", command=self.myProfileScreen)
myProfileButton.pack()
quitButton = Button(top, text="Log Out", font="Arial 14", command=top.destroy).pack(side="bottom", padx=20)
#top.title(':D')
#top.geometry('250x200')
#get first name and last name of current player
cur.execute("SELECT firstname, lastname FROM playerinfo WHERE username = '%s' AND password = '%s'" % (entry_user.get(), entry_pass.get()))
for namerow in cur.fetchall(): # print all the first cell
fn = namerow[0] #store firstname
ln = namerow[1] #store lastname
rlb1 = Label(middleFrame, text='\nWelcome %s %s\n' % (fn, ln), font="Arial 14")
rlb1.pack()
rlb2 = Label(middleFrame, text='\nUserName: %s' % entry_user.get(), font="Arial 14")
rlb2.pack()
top.mainloop()
self.destroy()
parent.destroy()
go_to_HomePage()'''
else:
r = Tk()
r.title(':D')
r.geometry('150x150')
rlbl = Label(r, text='\n[!] Invalid Login')
rlbl.pack()
r.mainloop()
dbconn.close()
########################################## SIGN UP SCREEN - GUI ####################################################
def SignupScreen(self):
global entry_fname
global entry_lname
global entry_user
global entry_pass
global entry_repass
global entry_email
global entry_ACBL
global entry_disID
top = Toplevel(self)
w, h = top.winfo_screenwidth(), top.winfo_screenheight()
top.overrideredirect(1)
top.geometry("550x450+%d+%d" % (w / 2 - 275, h / 2 - 140)) # 250
#top.configure(background='white')
quitButton = Button(top, text="Go Back", font="Arial 14", command= top.destroy).pack(side="bottom", padx=20)
#topFrame = Frame(top)
#topFrame.pack()
middleFrame = Frame(top)
middleFrame.pack(pady=50)
bottomFrame = Frame(top)
bottomFrame.pack(side=BOTTOM)
# Widgets and which frame they are in
#label = Label(topFrame, text="LET'S PLAY BRIDGE")
fnameLabel = Label(middleFrame,text = 'First Name:',font="Arial 14")
lnameLabel = Label(middleFrame, text='Last Name:',font="Arial 14")
userLabel = Label(middleFrame, text='Username:',font="Arial 14")
passLabel = Label(middleFrame, text='Password:',font="Arial 14")
repassLabel = Label(middleFrame, text='Re-Enter Password:',font="Arial 14")
emailLabel = Label(middleFrame, text='Email(optional):',font="Arial 14")
ACBLnumLabel = Label(middleFrame, text='ACBLnum(optional):',font="Arial 14")
disIDLabel = Label(middleFrame, text='DistrictID(optional):',font="Arial 14")
entry_fname = Entry(middleFrame) #For DB
entry_lname = Entry(middleFrame) #For DB
entry_user = Entry(middleFrame)#For DB
entry_pass = Entry(middleFrame, show = '*')#For DB
entry_repass = Entry(middleFrame, show = '*')#For DB
entry_email = Entry(middleFrame)#For DB
entry_ACBL = Entry(middleFrame)#For DB
entry_disID = Entry(middleFrame)#For DB
b = Button(bottomFrame, text="Sign up", font="Arial 14", command=lambda : combined_Functions(self))
# Location of the Widgets in their frames
#label.pack(side="top", fill="both", expand=True, padx=20, pady=20)
fnameLabel.grid(row=1, column=0, sticky=W)
entry_fname.grid(row=1, column=1)
lnameLabel.grid(row=2, column=0, sticky=W)
entry_lname.grid(row=2, column=1)
userLabel.grid(row=3, column=0, sticky=W)
entry_user.grid(row=3, column=1)
passLabel.grid(row=4, column=0, sticky=W)
entry_pass.grid(row=4, column=1)
repassLabel.grid(row=5, column=0, sticky=W)
entry_repass.grid(row=5, column=1)
emailLabel.grid(row=6, column=0, sticky=W)
entry_email.grid(row=6, column=1, padx=20, sticky= W)
ACBLnumLabel.grid(row=7, column=0, sticky=W)
entry_ACBL.grid(row=7, column=1, padx=20)
disIDLabel.grid(row=8, column=0, sticky=W)
entry_disID.grid(row=8, column=1)
b.grid(row=10, columnspan=2)
####################################DATABASE Check if Username is available, check if passwords Match -> if so SIGN UP!!!!!!!!!!!!!!!
def get_Signup_input():
var = dbConnect()
dbconn = mysql.connect(host=var.host, user=var.user, password=var.password, db=var.db)
cur = dbconn.cursor() # Cursor object - required to execute all queries
cur.execute("SELECT username FROM playerinfo WHERE username = '%s'" % entry_user.get())
rows = cur.fetchall()
if not rows:
# print(userInput + " is available")
if (entry_pass.get() == entry_repass.get()) and (entry_pass.get()!= "") and (entry_repass.get()!= ""):
# print("passwords match, good job brotha")
# INSERT new player ... playerinfo check
todaysdate = datetime.datetime.today().strftime('%Y-%m-%d') # current date
cur.execute("INSERT INTO playerinfo(username, password, signUpDate, firstname, lastname, email, ACLnum, districtID) VALUES('%s','%s','%s','%s','%s','%s','%s','%s')" % (
entry_user.get(), entry_pass.get(), todaysdate, entry_fname.get(), entry_lname.get(), entry_email.get(),entry_ACBL.get(), entry_disID.get()))
#get new player's ID
cur.execute("SELECT ID FROM playerinfo WHERE username='%s'" % entry_user.get())
for namerow in cur.fetchall(): # print all the first cell
idNum = namerow[0] # store ID number
# new player's...playerstats inserted by ID
cur.execute("INSERT INTO playerstats(ID) VALUES('%s')" % idNum)
dbconn.commit() #database commit aka save
r = Tk()
r.title(':D')
r.geometry('150x150')
rlbl = Label(r, text='\n[+] Signed Up!')
rlbl.pack()
r.mainloop()
else:
# print("passwords don't match bruh or are NULL")
r = Tk()
r.title(':D')
r.geometry('150x150')
rlbl = Label(r, text='\n[!] Retype your passwords')
rlbl.pack()
r.mainloop()
else:
r = Tk()
r.title(':D')
r.geometry('150x150')
rlbl = Label(r, text='\n[!] Username Not Available ')
rlbl.pack()
r.mainloop()
dbconn.close()
def go_to_Tutorial():
window = Toplevel()
window.geometry("600x500")
quitButton = Button(window, text="Cancel", font="Arial 14", command= window.destroy).pack(side="bottom", padx=20)
top_Frame = Frame(window)
top_Frame.pack()
tLabel = Label(top_Frame, text="TUTORIAL", font="Arial 36").pack(side="top", fill="both", expand=True, padx=20, pady=20)
def combined_Functions(self): # for the Sign Up button - store data, exits Sign Up screen, goes to Tutorial screen
get_Signup_input()
# top.destroy()
#go_to_Tutorial()
#####################################My Profile - GUI #########################################
def myProfileScreen(self):
top = Toplevel(self)
w, h = top.winfo_screenwidth(), top.winfo_screenheight()
top.overrideredirect(1)
w, h = self.winfo_screenwidth(), self.winfo_screenheight()
top.overrideredirect(1)
top.geometry("%dx%d+0+0" % (w, h))
topFrame = Frame(top)
topFrame.pack()
bottomFrame = Frame(top)
bottomFrame.pack(side=BOTTOM)
rightFrame = Frame(top)
rightFrame.pack(side= RIGHT)
leftFrame = Frame(top)
leftFrame.pack(side=LEFT)
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@DB stuff@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#entry_user.get() //username
var = dbConnect()
dbconn = mysql.connect(host=var.host, user=var.user, password=var.password, db=var.db)
cur = dbconn.cursor() # Cursor object - required to execute all queries
global data
data=[]
# get all info from playerinfo and playerstats using current username
cur.execute(
"SELECT playerinfo.firstname, playerinfo.lastname, playerinfo.username, playerinfo.email, playerinfo.signUpDate, playerinfo.districtID, playerinfo.ACLnum, playerstats.dealsplayed, playerstats.level, playerstats.exp, playerstats.coins, playerstats.tournys FROM playerstats INNER JOIN playerinfo ON playerinfo.ID=playerstats.ID AND playerinfo.username='%s'" % entry_user.get())
for namerow in cur.fetchall(): # print all info
fn = namerow[0] # firstname
ln = namerow[1] # lastname
un = namerow[2] #username
em = namerow[3] # email
sData = namerow[4] # signUpDate
districtID = namerow[5] # District ID
acblNumba = namerow[6] # ACBL Number
dPlay = namerow[7] #deals played
lvl = namerow[8] # level
exp = namerow[9] # experience
coins = namerow[10] # coins
tornys = namerow[11] # tournaments
dbconn.close() #close db connection
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
label = Label(topFrame, text="LET'S PLAY BRIDGE",font =('Coralva', 42)).pack(side="top", fill="both", expand=True)
mpLabel = Label(rightFrame, text='My Profile: ', font = ('Comic Sans MS',24)).grid(ipadx = 200, columnspan = 2)
nameLabel = Label(rightFrame, text="Name: %s %s" % (fn, ln), font = ('Comic Sans MS',14)).grid(row=1, column=0, sticky = W)
userLabel = Label(rightFrame, text='Username: %s' % un, font = ('Comic Sans MS',14)).grid(row=2, column=0, sticky = W)
emailLabel = Label (rightFrame, text='Email: %s' % em, font = ('Comic Sans MS',14)).grid(row=3, column=0, sticky = W)
sLabel = Label(rightFrame, text='Signup Date: %s' %sData, font = ('Comic Sans MS',14)).grid(row=4, column=0, sticky = W)
disIDLabel = Label(rightFrame, text='DistrictID: %s' % districtID , font = ('Comic Sans MS',14)).grid(row=5, column=0, sticky = W)
ACBLnumLabel = Label(rightFrame, text='ACBL #: %s' % acblNumba, font = ('Comic Sans MS',14)).grid(row=6, column=0, sticky = W)
nothing = Label(rightFrame).grid(row=7, column=0)
msLabel= Label(rightFrame, text='My Stats', font = ('Comic Sans MS',14, 'bold')).grid(row=8, column=0, sticky = W)
dpLabel = Label(rightFrame, text='Deals Played: %s' %dPlay, font = ('Comic Sans MS',14)).grid(row=9, column=0, sticky = W)
levelLabel = Label(rightFrame, text='Level: %s' % lvl, font = ('Comic Sans MS',14)).grid(row=10, column=0, sticky = W)
expLabel = Label(rightFrame, text='Experience: %s' % exp, font = ('Comic Sans MS',14)).grid(row=11, column=0, sticky = W)
coinsLabel = Label(rightFrame, text='Coins: %s' % coins, font = ('Comic Sans MS',14)).grid(row=12, column=0, sticky = W)
tourLabel = Label(rightFrame, text='Tournaments: %s' % tornys, font = ('Comic Sans MS',14)).grid(row=13, column=0, sticky = W)
#b = Button(bottomFrame, text="HOME",font = 'Arial 12').pack(side=LEFT) #FIND A IMAGE OF A HOUSE
quitButton = Button(bottomFrame, text="Go Back", command=top.destroy, font = 'Arial 12').pack(side = RIGHT)
root = Tk()
MainMenu(root).pack(fill="both", expand=True)
root.mainloop()
|
ACBL-Bridge/Bridge-Application
|
Home Files/LoginandSignupV10.py
|
Python
|
mit
| 17,362 | 0.013708 |
"""
Copy RWIS data from iem database to its final resting home in 'rwis'
The RWIS data is partitioned by UTC timestamp
Run at 0Z and 12Z, provided with a timestamp to process
"""
import datetime
import sys
import psycopg2.extras
from pyiem.util import get_dbconn, utc
def main(argv):
"""Go main"""
iemdb = get_dbconn("iem")
rwisdb = get_dbconn("rwis")
ts = utc(int(argv[1]), int(argv[2]), int(argv[3]))
ts2 = ts + datetime.timedelta(hours=24)
rcursor = rwisdb.cursor()
# Remove previous entries for this UTC date
for suffix in ["", "_soil", "_traffic"]:
rcursor.execute(
f"DELETE from t{ts.year}{suffix} WHERE valid >= %s and valid < %s",
(ts, ts2),
)
rcursor.close()
# Always delete stuff 3 or more days old from iemaccess
icursor = iemdb.cursor()
icursor.execute(
"DELETE from rwis_traffic_data_log WHERE "
"valid < ('TODAY'::date - '3 days'::interval)"
)
icursor.execute(
"DELETE from rwis_soil_data_log WHERE "
"valid < ('TODAY'::date - '3 days'::interval)"
)
icursor.close()
# Get traffic obs from access
icursor = iemdb.cursor(cursor_factory=psycopg2.extras.DictCursor)
icursor.execute(
"""SELECT l.nwsli as station, s.lane_id, d.* from
rwis_traffic_data_log d, rwis_locations l, rwis_traffic_sensors s
WHERE s.id = d.sensor_id and valid >= '%s' and valid < '%s'
and s.location_id = l.id"""
% (ts, ts2)
)
rows = icursor.fetchall()
if not rows:
print("No RWIS traffic found between %s and %s" % (ts, ts2))
icursor.close()
# Write to archive
rcursor = rwisdb.cursor()
rcursor.executemany(
f"""INSERT into t{ts.year}_traffic
(station, valid,
lane_id, avg_speed, avg_headway, normal_vol, long_vol, occupancy)
VALUES (%(station)s,%(valid)s,
%(lane_id)s, %(avg_speed)s, %(avg_headway)s, %(normal_vol)s,
%(long_vol)s, %(occupancy)s)
""",
rows,
)
rcursor.close()
# Get soil obs from access
icursor = iemdb.cursor(cursor_factory=psycopg2.extras.DictCursor)
sql = """SELECT l.nwsli as station, d.valid,
max(case when sensor_id = 1 then temp else null end) as tmpf_1in,
max(case when sensor_id = 3 then temp else null end) as tmpf_3in,
max(case when sensor_id = 6 then temp else null end) as tmpf_6in,
max(case when sensor_id = 9 then temp else null end) as tmpf_9in,
max(case when sensor_id = 12 then temp else null end) as tmpf_12in,
max(case when sensor_id = 18 then temp else null end) as tmpf_18in,
max(case when sensor_id = 24 then temp else null end) as tmpf_24in,
max(case when sensor_id = 30 then temp else null end) as tmpf_30in,
max(case when sensor_id = 36 then temp else null end) as tmpf_36in,
max(case when sensor_id = 42 then temp else null end) as tmpf_42in,
max(case when sensor_id = 48 then temp else null end) as tmpf_48in,
max(case when sensor_id = 54 then temp else null end) as tmpf_54in,
max(case when sensor_id = 60 then temp else null end) as tmpf_60in,
max(case when sensor_id = 66 then temp else null end) as tmpf_66in,
max(case when sensor_id = 72 then temp else null end) as tmpf_72in
from rwis_soil_data_log d, rwis_locations l
WHERE valid >= '%s' and valid < '%s' and d.location_id = l.id
GROUP by station, valid""" % (
ts,
ts2,
)
icursor.execute(sql)
rows = icursor.fetchall()
if not rows:
print("No RWIS soil obs found between %s and %s" % (ts, ts2))
icursor.close()
# Write to RWIS Archive
rcursor = rwisdb.cursor()
rcursor.executemany(
f"""INSERT into t{ts.year}_soil
(station, valid,
tmpf_1in, tmpf_3in, tmpf_6in, tmpf_9in, tmpf_12in, tmpf_18in,
tmpf_24in, tmpf_30in, tmpf_36in, tmpf_42in, tmpf_48in, tmpf_54in,
tmpf_60in, tmpf_66in, tmpf_72in) VALUES (
%(station)s,%(valid)s,
%(tmpf_1in)s, %(tmpf_3in)s, %(tmpf_6in)s, %(tmpf_9in)s, %(tmpf_12in)s,
%(tmpf_18in)s, %(tmpf_24in)s, %(tmpf_30in)s, %(tmpf_36in)s,
%(tmpf_42in)s, %(tmpf_48in)s, %(tmpf_54in)s, %(tmpf_60in)s,
%(tmpf_66in)s, %(tmpf_72in)s)
""",
rows,
)
rcursor.close()
# Get regular obs from Access
icursor = iemdb.cursor(cursor_factory=psycopg2.extras.DictCursor)
# Since we store drct in the RWIS archive as NaN, we better make sure
# we don't attempt to use these values as it will error out
icursor.execute("update current_log set drct = null where drct = 'NaN'")
sql = """SELECT c.*, t.id as station from current_log c, stations t
WHERE valid >= '%s' and valid < '%s'
and t.network ~* 'RWIS' and t.iemid = c.iemid""" % (
ts,
ts2,
)
icursor.execute(sql)
rows = icursor.fetchall()
if not rows:
print("No RWIS obs found between %s and %s" % (ts, ts2))
icursor.close()
# Write to RWIS Archive
rcursor = rwisdb.cursor()
rcursor.executemany(
f"""INSERT into t{ts.year} (station, valid, tmpf,
dwpf, drct, sknt, tfs0, tfs1, tfs2, tfs3, subf, gust, tfs0_text,
tfs1_text, tfs2_text, tfs3_text, pcpn, vsby) VALUES (%(station)s,
%(valid)s,%(tmpf)s,%(dwpf)s,%(drct)s,%(sknt)s,%(tsf0)s,
%(tsf1)s,%(tsf2)s,%(tsf3)s,%(rwis_subf)s,%(gust)s,%(scond0)s,
%(scond1)s,%(scond2)s,%(scond3)s,%(pday)s,%(vsby)s)""",
rows,
)
rcursor.close()
rwisdb.commit()
iemdb.commit()
rwisdb.close()
iemdb.close()
if __name__ == "__main__":
main(sys.argv)
|
akrherz/iem
|
scripts/dbutil/rwis2archive.py
|
Python
|
mit
| 5,737 | 0 |
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005 (ita)
"Module called for configuring, compiling and installing targets"
import os, sys, shutil, traceback, datetime, inspect, errno
import Utils, Configure, Build, Logs, Options, Environment, Task
from Logs import error, warn, info
from Constants import *
g_gz = 'bz2'
commands = []
def prepare_impl(t, cwd, ver, wafdir):
Options.tooldir = [t]
Options.launch_dir = cwd
# some command-line options can be processed immediately
if '--version' in sys.argv:
opt_obj = Options.Handler()
opt_obj.curdir = cwd
opt_obj.parse_args()
sys.exit(0)
# now find the wscript file
msg1 = 'Waf: Please run waf from a directory containing a file named "%s" or run distclean' % WSCRIPT_FILE
# in theory projects can be configured in an autotool-like manner:
# mkdir build && cd build && ../waf configure && ../waf
build_dir_override = None
candidate = None
lst = os.listdir(cwd)
search_for_candidate = True
if WSCRIPT_FILE in lst:
candidate = cwd
elif 'configure' in sys.argv and not WSCRIPT_BUILD_FILE in lst:
# autotool-like configuration
calldir = os.path.abspath(os.path.dirname(sys.argv[0]))
if WSCRIPT_FILE in os.listdir(calldir):
candidate = calldir
search_for_candidate = False
else:
error('arg[0] directory does not contain a wscript file')
sys.exit(1)
build_dir_override = cwd
# climb up to find a script if it is not found
while search_for_candidate:
if len(cwd) <= 3:
break # stop at / or c:
dirlst = os.listdir(cwd)
if WSCRIPT_FILE in dirlst:
candidate = cwd
if 'configure' in sys.argv and candidate:
break
if Options.lockfile in dirlst:
env = Environment.Environment()
try:
env.load(os.path.join(cwd, Options.lockfile))
except:
error('could not load %r' % Options.lockfile)
try:
os.stat(env['cwd'])
except:
candidate = cwd
else:
candidate = env['cwd']
break
cwd = os.path.dirname(cwd) # climb up
if not candidate:
# check if the user only wanted to display the help
if '-h' in sys.argv or '--help' in sys.argv:
warn('No wscript file found: the help message may be incomplete')
opt_obj = Options.Handler()
opt_obj.curdir = cwd
opt_obj.parse_args()
else:
error(msg1)
sys.exit(0)
# We have found wscript, but there is no guarantee that it is valid
try:
os.chdir(candidate)
except OSError:
raise Utils.WafError("the folder %r is unreadable" % candidate)
# define the main module containing the functions init, shutdown, ..
Utils.set_main_module(os.path.join(candidate, WSCRIPT_FILE))
if build_dir_override:
d = getattr(Utils.g_module, BLDDIR, None)
if d:
# test if user has set the blddir in wscript.
msg = ' Overriding build directory %s with %s' % (d, build_dir_override)
warn(msg)
Utils.g_module.blddir = build_dir_override
# bind a few methods and classes by default
def set_def(obj, name=''):
n = name or obj.__name__
if not n in Utils.g_module.__dict__:
setattr(Utils.g_module, n, obj)
for k in [dist, distclean, distcheck, clean, install, uninstall]:
set_def(k)
set_def(Configure.ConfigurationContext, 'configure_context')
for k in ['build', 'clean', 'install', 'uninstall']:
set_def(Build.BuildContext, k + '_context')
# now parse the options from the user wscript file
opt_obj = Options.Handler(Utils.g_module)
opt_obj.curdir = candidate
try:
f = Utils.g_module.set_options
except AttributeError:
pass
else:
opt_obj.sub_options([''])
opt_obj.parse_args()
if not 'init' in Utils.g_module.__dict__:
Utils.g_module.init = Utils.nada
if not 'shutdown' in Utils.g_module.__dict__:
Utils.g_module.shutdown = Utils.nada
main()
def prepare(t, cwd, ver, wafdir):
if WAFVERSION != ver:
msg = 'Version mismatch: waf %s <> wafadmin %s (wafdir %s)' % (ver, WAFVERSION, wafdir)
print('\033[91mError: %s\033[0m' % msg)
sys.exit(1)
#"""
try:
prepare_impl(t, cwd, ver, wafdir)
except Utils.WafError, e:
error(str(e))
sys.exit(1)
except KeyboardInterrupt:
Utils.pprint('RED', 'Interrupted')
sys.exit(68)
"""
import cProfile, pstats
cProfile.runctx("import Scripting; Scripting.prepare_impl(t, cwd, ver, wafdir)", {},
{'t': t, 'cwd':cwd, 'ver':ver, 'wafdir':wafdir},
'profi.txt')
p = pstats.Stats('profi.txt')
p.sort_stats('time').print_stats(45)
#"""
def main():
global commands
commands = Options.arg_line[:]
while commands:
x = commands.pop(0)
ini = datetime.datetime.now()
if x == 'configure':
fun = configure
elif x == 'build':
fun = build
else:
fun = getattr(Utils.g_module, x, None)
if not fun:
raise Utils.WscriptError('No such command %r' % x)
ctx = getattr(Utils.g_module, x + '_context', Utils.Context)()
if x in ['init', 'shutdown', 'dist', 'distclean', 'distcheck']:
# compatibility TODO remove in waf 1.6
try:
fun(ctx)
except TypeError:
fun()
else:
fun(ctx)
ela = ''
if not Options.options.progress_bar:
ela = ' (%s)' % Utils.get_elapsed_time(ini)
if x != 'init' and x != 'shutdown':
info('%r finished successfully%s' % (x, ela))
if not commands and x != 'shutdown':
commands.append('shutdown')
def configure(conf):
src = getattr(Options.options, SRCDIR, None)
if not src: src = getattr(Utils.g_module, SRCDIR, None)
if not src: src = getattr(Utils.g_module, 'top', None)
if not src:
src = '.'
incomplete_src = 1
src = os.path.abspath(src)
bld = getattr(Options.options, BLDDIR, None)
if not bld: bld = getattr(Utils.g_module, BLDDIR, None)
if not bld: bld = getattr(Utils.g_module, 'out', None)
if not bld:
bld = 'build'
incomplete_bld = 1
if bld == '.':
raise Utils.WafError('Setting blddir="." may cause distclean problems')
bld = os.path.abspath(bld)
try: os.makedirs(bld)
except OSError: pass
# It is not possible to compile specific targets in the configuration
# this may cause configuration errors if autoconfig is set
targets = Options.options.compile_targets
Options.options.compile_targets = None
Options.is_install = False
conf.srcdir = src
conf.blddir = bld
conf.post_init()
if 'incomplete_src' in vars():
conf.check_message_1('Setting srcdir to')
conf.check_message_2(src)
if 'incomplete_bld' in vars():
conf.check_message_1('Setting blddir to')
conf.check_message_2(bld)
# calling to main wscript's configure()
conf.sub_config([''])
conf.store()
# this will write a configure lock so that subsequent builds will
# consider the current path as the root directory (see prepare_impl).
# to remove: use 'waf distclean'
env = Environment.Environment()
env[BLDDIR] = bld
env[SRCDIR] = src
env['argv'] = sys.argv
env['commands'] = Options.commands
env['options'] = Options.options.__dict__
# conf.hash & conf.files hold wscript files paths and hash
# (used only by Configure.autoconfig)
env['hash'] = conf.hash
env['files'] = conf.files
env['environ'] = dict(conf.environ)
env['cwd'] = os.path.split(Utils.g_module.root_path)[0]
if Utils.g_module.root_path != src:
# in case the source dir is somewhere else
env.store(os.path.join(src, Options.lockfile))
env.store(Options.lockfile)
Options.options.compile_targets = targets
def clean(bld):
'''removes the build files'''
try:
proj = Environment.Environment(Options.lockfile)
except IOError:
raise Utils.WafError('Nothing to clean (project not configured)')
bld.load_dirs(proj[SRCDIR], proj[BLDDIR])
bld.load_envs()
bld.is_install = 0 # False
# read the scripts - and set the path to the wscript path (useful for srcdir='/foo/bar')
bld.add_subdirs([os.path.split(Utils.g_module.root_path)[0]])
try:
bld.clean()
finally:
bld.save()
def check_configured(bld):
if not Configure.autoconfig:
return bld
conf_cls = getattr(Utils.g_module, 'configure_context', Utils.Context)
bld_cls = getattr(Utils.g_module, 'build_context', Utils.Context)
def reconf(proj):
back = (Options.commands, Options.options.__dict__, Logs.zones, Logs.verbose)
Options.commands = proj['commands']
Options.options.__dict__ = proj['options']
conf = conf_cls()
conf.environ = proj['environ']
configure(conf)
(Options.commands, Options.options.__dict__, Logs.zones, Logs.verbose) = back
try:
proj = Environment.Environment(Options.lockfile)
except IOError:
conf = conf_cls()
configure(conf)
else:
try:
bld = bld_cls()
bld.load_dirs(proj[SRCDIR], proj[BLDDIR])
bld.load_envs()
except Utils.WafError:
reconf(proj)
return bld_cls()
try:
proj = Environment.Environment(Options.lockfile)
except IOError:
raise Utils.WafError('Auto-config: project does not configure (bug)')
h = 0
try:
for file in proj['files']:
if file.endswith('configure'):
h = hash((h, Utils.readf(file)))
else:
mod = Utils.load_module(file)
h = hash((h, mod.waf_hash_val))
except (OSError, IOError):
warn('Reconfiguring the project: a file is unavailable')
reconf(proj)
else:
if (h != proj['hash']):
warn('Reconfiguring the project: the configuration has changed')
reconf(proj)
return bld_cls()
def install(bld):
'''installs the build files'''
bld = check_configured(bld)
Options.commands['install'] = True
Options.commands['uninstall'] = False
Options.is_install = True
bld.is_install = INSTALL
build_impl(bld)
bld.install()
def uninstall(bld):
'''removes the installed files'''
Options.commands['install'] = False
Options.commands['uninstall'] = True
Options.is_install = True
bld.is_install = UNINSTALL
try:
def runnable_status(self):
return SKIP_ME
setattr(Task.Task, 'runnable_status_back', Task.Task.runnable_status)
setattr(Task.Task, 'runnable_status', runnable_status)
build_impl(bld)
bld.install()
finally:
setattr(Task.Task, 'runnable_status', Task.Task.runnable_status_back)
def build(bld):
bld = check_configured(bld)
Options.commands['install'] = False
Options.commands['uninstall'] = False
Options.is_install = False
bld.is_install = 0 # False
return build_impl(bld)
def build_impl(bld):
# compile the project and/or install the files
try:
proj = Environment.Environment(Options.lockfile)
except IOError:
raise Utils.WafError("Project not configured (run 'waf configure' first)")
bld.load_dirs(proj[SRCDIR], proj[BLDDIR])
bld.load_envs()
info("Waf: Entering directory `%s'" % bld.bldnode.abspath())
bld.add_subdirs([os.path.split(Utils.g_module.root_path)[0]])
# execute something immediately before the build starts
bld.pre_build()
try:
bld.compile()
finally:
if Options.options.progress_bar: print('')
info("Waf: Leaving directory `%s'" % bld.bldnode.abspath())
# execute something immediately after a successful build
bld.post_build()
bld.install()
excludes = '.bzr .bzrignore .git .gitignore .svn CVS .cvsignore .arch-ids {arch} SCCS BitKeeper .hg _MTN _darcs Makefile Makefile.in config.log .gitattributes .hgignore .hgtags'.split()
dist_exts = '~ .rej .orig .pyc .pyo .bak .tar.bz2 tar.gz .zip .swp'.split()
def dont_dist(name, src, build_dir):
global excludes, dist_exts
if (name.startswith(',,')
or name.startswith('++')
or name.startswith('.waf')
or (src == '.' and name == Options.lockfile)
or name in excludes
or name == build_dir
):
return True
for ext in dist_exts:
if name.endswith(ext):
return True
return False
# like shutil.copytree
# exclude files and to raise exceptions immediately
def copytree(src, dst, build_dir):
names = os.listdir(src)
os.makedirs(dst)
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if dont_dist(name, src, build_dir):
continue
if os.path.isdir(srcname):
copytree(srcname, dstname, build_dir)
else:
shutil.copy2(srcname, dstname)
# TODO in waf 1.6, change this method if "srcdir == blddir" is allowed
def distclean(ctx=None):
'''removes the build directory'''
global commands
lst = os.listdir('.')
for f in lst:
if f == Options.lockfile:
try:
proj = Environment.Environment(f)
except:
Logs.warn('could not read %r' % f)
continue
try:
shutil.rmtree(proj[BLDDIR])
except IOError:
pass
except OSError, e:
if e.errno != errno.ENOENT:
Logs.warn('project %r cannot be removed' % proj[BLDDIR])
try:
os.remove(f)
except OSError, e:
if e.errno != errno.ENOENT:
Logs.warn('file %r cannot be removed' % f)
# remove the local waf cache
if not commands and f.startswith('.waf'):
shutil.rmtree(f, ignore_errors=True)
# FIXME waf 1.6 a unique ctx parameter, and remove the optional appname and version
def dist(appname='', version=''):
'''makes a tarball for redistributing the sources'''
# return return (distdirname, tarballname)
import tarfile
if not appname: appname = Utils.g_module.APPNAME
if not version: version = Utils.g_module.VERSION
tmp_folder = appname + '-' + version
if g_gz in ['gz', 'bz2']:
arch_name = tmp_folder + '.tar.' + g_gz
else:
arch_name = tmp_folder + '.' + 'zip'
# remove the previous dir
try:
shutil.rmtree(tmp_folder)
except (OSError, IOError):
pass
# remove the previous archive
try:
os.remove(arch_name)
except (OSError, IOError):
pass
# copy the files into the temporary folder
blddir = getattr(Utils.g_module, BLDDIR, None)
if not blddir:
blddir = getattr(Utils.g_module, 'out', None)
copytree('.', tmp_folder, blddir)
# undocumented hook for additional cleanup
dist_hook = getattr(Utils.g_module, 'dist_hook', None)
if dist_hook:
back = os.getcwd()
os.chdir(tmp_folder)
try:
dist_hook()
finally:
# go back to the root directory
os.chdir(back)
if g_gz in ['gz', 'bz2']:
tar = tarfile.open(arch_name, 'w:' + g_gz)
tar.add(tmp_folder)
tar.close()
else:
Utils.zip_folder(tmp_folder, arch_name, tmp_folder)
try: from hashlib import sha1 as sha
except ImportError: from sha import sha
try:
digest = " (sha=%r)" % sha(Utils.readf(arch_name)).hexdigest()
except:
digest = ''
info('New archive created: %s%s' % (arch_name, digest))
if os.path.exists(tmp_folder): shutil.rmtree(tmp_folder)
return arch_name
# FIXME waf 1.6 a unique ctx parameter, and remove the optional appname and version
def distcheck(appname='', version='', subdir=''):
'''checks if the sources compile (tarball from 'dist')'''
import tempfile, tarfile
if not appname: appname = Utils.g_module.APPNAME
if not version: version = Utils.g_module.VERSION
waf = os.path.abspath(sys.argv[0])
tarball = dist(appname, version)
path = appname + '-' + version
# remove any previous instance
if os.path.exists(path):
shutil.rmtree(path)
t = tarfile.open(tarball)
for x in t: t.extract(x)
t.close()
# build_path is the directory for the waf invocation
if subdir:
build_path = os.path.join(path, subdir)
else:
build_path = path
instdir = tempfile.mkdtemp('.inst', '%s-%s' % (appname, version))
ret = Utils.pproc.Popen([waf, 'configure', 'build', 'install', 'uninstall', '--destdir=' + instdir], cwd=build_path).wait()
if ret:
raise Utils.WafError('distcheck failed with code %i' % ret)
if os.path.exists(instdir):
raise Utils.WafError('distcheck succeeded, but files were left in %s' % instdir)
shutil.rmtree(path)
# FIXME remove in Waf 1.6 (kept for compatibility)
def add_subdir(dir, bld):
bld.recurse(dir, 'build')
|
urisimchoni/samba
|
third_party/waf/wafadmin/Scripting.py
|
Python
|
gpl-3.0
| 15,298 | 0.032684 |
import time
from pymongo import MongoClient
from datetime import datetime, timedelta
import json
from bson import Binary, Code
from bson.json_util import dumps
client = MongoClient('localhost', 27017)
db = client['election-2016']
def dumpData(yesterdayStr):
collectionName = 't' + yesterdayStr
cursor = db[collectionName].find()
count = cursor.count()
print(collectionName + ' found ' + str(count) + ' tweets')
# dump only if data count is greater than 0
if count > 0:
file = open('out/' + yesterdayStr + '.json', 'w')
file.write('[')
i = 0
for document in cursor:
doc = dumps(document)
file.write(doc)
if (i != count - 1):
file.write(',\n')
else:
file.write('\n]')
i = i + 1
print('data for ' + yesterdayStr + ' successfully dumped at ' + str(now))
# Run following code when the program starts
if __name__ == '__main__':
currentDate = str(datetime.now().month) + '_' + str(datetime.now().day)
#get now and yesterday strings
now = datetime.now()
yesterday = now - timedelta(days=1)
yesterdayStr = str(yesterday.month) + '_' + str(yesterday.day)
#update currentDate
dumpData(yesterdayStr)
|
seungkim11/election-2016
|
python_streaming/yesterday_dump.py
|
Python
|
apache-2.0
| 1,289 | 0.006206 |
"""
Settings for testing the application.
"""
import os
DEBUG = True
DJANGO_RDFLIB_DEVELOP = True
DB_PATH = os.path.abspath(os.path.join(__file__, '..', '..', '..', 'rdflib_django.db'))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': DB_PATH,
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
SITE_ID = 1
STATIC_URL = '/static/'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'rdflib_django',
)
ROOT_URLCONF = 'rdflib_django.urls'
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'': {
'handlers': ['console'],
'propagate': True,
'level': 'INFO',
},
}
}
|
publysher/rdflib-django
|
src/rdflib_django/testsettings.py
|
Python
|
mit
| 1,265 | 0.000791 |
from vint.ast.traversing import traverse, register_traverser_extension
from vint.ast.parsing import Parser
from vint.ast.node_type import NodeType
REDIR_CONTENT = 'VINT:redir_content'
class RedirAssignmentParser(object):
""" A class to make redir assignment parseable. """
def process(self, ast):
def enter_handler(node):
node_type = NodeType(node['type'])
if node_type is not NodeType.EXCMD:
return
is_redir_command = node['ea']['cmd'].get('name') == 'redir'
if not is_redir_command:
return
redir_cmd_str = node['str']
is_redir_assignment = '=>' in redir_cmd_str
if not is_redir_assignment:
return
parser = Parser()
redir_content_node = parser.parse_redir(node)
node[REDIR_CONTENT] = redir_content_node
traverse(ast, on_enter=enter_handler)
return ast
def get_redir_content(node):
return node.get(REDIR_CONTENT)
@register_traverser_extension
def traverse_redir_content(node, on_enter=None, on_leave=None):
if REDIR_CONTENT not in node:
return
traverse(node[REDIR_CONTENT], on_enter=on_enter, on_leave=on_leave)
|
Kuniwak/vint
|
vint/ast/plugin/scope_plugin/redir_assignment_parser.py
|
Python
|
mit
| 1,250 | 0.0024 |
# Used for when precision or recall == 0 to supress warnings
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import numpy as np
import sklearn_crfsuite
from sklearn.metrics import make_scorer, confusion_matrix
from sklearn_crfsuite import metrics
from sklearn_crfsuite.utils import flatten
from sklearn.model_selection import cross_validate, cross_val_predict, StratifiedKFold
from collections import Counter
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from chemdataextractor.doc import Paragraph
from Tools import TextTools
stop_word_list = set(stopwords.words('english'))
wordnet_lemmatizer = WordNetLemmatizer()
chem_ents = []
def main():
train_docs = TextTools.loadNER("ASC")
train_sents = []
for index, row in train_docs.iterrows():
for word in row['text']:
train_sents.append(word)
chem_ents = extract_chem_entities(train_sents)
X = [sent2features(s,chem_ents) for s in train_sents]
y = [sent2labels(s) for s in train_sents]
crf = sklearn_crfsuite.CRF(
algorithm='lbfgs',
c1=0.1,
c2=0.1,
all_possible_transitions=True)
crf.fit(X, y)
# List of labels removing the non-entity classes
labels = list(crf.classes_)
labels.remove('O')
NUMBER_OF_FOLDS = 5
scoreers = {
"f1_scores": make_scorer(metrics.flat_f1_score, average='weighted', labels=labels),
"precision_scores": make_scorer(metrics.flat_precision_score, average='weighted', labels=labels),
"recall_scores": make_scorer(metrics.flat_recall_score, average='weighted', labels=labels),
}
scores = cross_validate(crf, X, y, cv=NUMBER_OF_FOLDS, scoring=scoreers, return_train_score=False, n_jobs=-1)
f1_scores = scores['test_f1_scores']
precision_scores = scores['test_precision_scores']
recall_scores = scores['test_recall_scores']
for x in range(NUMBER_OF_FOLDS):
print("Fold number: ", x)
print("Precision: ", precision_scores[x])
print("Recall: ", recall_scores[x])
print("F1 Score: ", f1_scores[x])
print("\n")
print("Averages Across Folds")
print("Precision: ", np.average(np.array(precision_scores)))
print("Recall: ", np.average(np.array(recall_scores)))
print("F1 Score: ", np.average(np.array(f1_scores)))
y_pred = cross_val_predict(crf, X, y, cv=NUMBER_OF_FOLDS)
conf_mat = confusion_matrix(flatten(y), flatten(y_pred))
print("\nConfusion Matrix\n")
print(" ".join(["NonEntity", "CoreComposition", "Precursor", "ReducingAgent", "Solvent", "Stabilizer"]))
print(conf_mat)
print("Top positive:")
print_state_features(Counter(crf.state_features_).most_common(30))
print("\nTop negative:")
print_state_features(Counter(crf.state_features_).most_common()[-30:])
def extract_chem_entities(sents):
document_text = [[str(w[0]) for w in s] for s in sents]
document_text = [" ".join(s) for s in document_text]
document_text = " ".join(document_text)
paragraph = Paragraph(document_text)
chem_entities = paragraph.cems
chem_entities = [c.text for c in chem_entities]
return chem_entities
def print_state_features(state_features):
for (attr, label), weight in state_features:
print("%0.6f %-8s %s" % (weight, label, attr))
def word2features(sent, word_position):
SENTENCE_BEGGINING = 0
SENTENCE_END = len(sent) - 1
word = sent[word_position][0]
pos = sent[word_position][1]
features = featureize(word, pos)
if word_position == SENTENCE_BEGGINING:
features.append('BOS')
if word_position > SENTENCE_BEGGINING:
previous_word = sent[word_position-1][0]
previous_pos = sent[word_position-1][1]
features.extend(featureize(previous_word, previous_pos, relation="-1"))
if word_position < SENTENCE_END:
next_word = sent[word_position+1][0]
next_pos = sent[word_position+1][1]
features.extend(featureize(next_word, next_pos, relation="+1"))
if word_position == SENTENCE_END:
features.append('EOS')
return features
def featureize(word, postag, relation=""):
suffix = word[-3:]
prefix = word[:3]
return [
relation + 'word.lower=' + word.lower(),
relation + 'word.isupper=%s' % word.isupper(),
relation + 'word.istitle=%s' % word.istitle(),
relation + 'word.isdigit=%s' % word.isdigit(),
relation + 'word.postag=%s' % postag,
relation + 'word.prefix=%s' % prefix,
relation + 'word.suffix=%s' % suffix,
relation + 'word.lemma=%s' % wordnet_lemmatizer.lemmatize(word),
relation + 'word.ischem=%s' % (word in chem_ents),
relation + 'word.containsdigit=%s' % contains_digit(word),
]
def sent2features(sent, chem_ents):
return [word2features(sent, i) for i in range(len(sent))]
def sent2labels(sent):
return [label for token, postag, label in sent]
def contains_digit(s):
return any(i.isdigit() for i in s)
if __name__ == "__main__":
main()
|
bmcinnes/VCU-VIP-Nanoinformatics
|
NERD/CRF/CRF.py
|
Python
|
gpl-3.0
| 5,075 | 0.00532 |
'''Biblioteca que contém as rotinas de coversão dos diferentes tipos
de máquinas.
Autor: Lucas Possatti
'''
import re
import collections
def mealy_to_moore(me):
'''Converte o parâmetro 'me' (que deve ser uma máquina Mealy) para
uma máquina de Moore, que é retornada.
'''
# Verifica se a máquina recebida, realemente é mealy.
if me[0] != 'mealy':
raise 'O método mealy_to_moore esperava receber uma máquina de mealy como entrada.'
# Cria a máquina de moore.
moo = ['moore']
#!# # Procura as trasições com destino a cada um dos estados, para
#!# # verificar se há mais de uma transição que destina a um único estado.
#!# for state in me[3][1:]:
#!# state_trans_outputs = set()
#!# for trans in me[6][1:]:
#!# if state == trans[1]:
#!# pass
# Inicia um dicionário, com todos os estados como chaves, e um conjunto
# vazio para seus valores.
state_outputs = collections.OrderedDict()
for state in me[3][1:]:
state_outputs[state] = []
# Busca as saídas que são geradas com a transição para cada um dos estados.
for trans in me[6][1:]:
# Verifica se o estado de destino está no dicionário 'state_outputs'.
if trans[1] not in state_outputs:
raise "Some transition state destination is not declared in the machine definition (states section). Malformed machine definition."
# Adiciona a saída a lista do estado, somente se já não tiver sido adicionada.
if trans[3] not in state_outputs[trans[1]]:
state_outputs[trans[1]].append(trans[3])
# Define quais serão os novos estados na máquina de moore.
moore_states = []
out_fn = []
for state in state_outputs:
# Se o estado tem mais de um output
if len(state_outputs[state]) > 1:
# Itera sobre cada um dos outputs desse estado, para gerar os
# novos estados que forem necessários. Acrescentando '*' para
# cada novo estado criado.
i = 0
for output in state_outputs[state]:
# Gera o nome para o novo estado.
new_state = state + '*'*i
# Adiciona o estado, a lista de estados da nova máquina
moore_states.append(new_state)
# Forma a tupla para a função de saída (out-fn).
out_fn.append([new_state, output])
i += 1
# Se o estado tem um único output.
elif len(state_outputs[state]) == 1:
# Adiciona o estado, a lista de estados da nova máquina
moore_states.append(state)
# Pega a única saída desse estado.
output = state_outputs[state][0]
# Forma a tupla para a função de saída (out-fn).
out_fn.append([state, output])
# Caso o estado não tenha qualquer output (como por exemplo, se
# não houver qualquer transição com destino a ele).
else:
# Adiciona o estado, a lista de estados da nova máquina
moore_states.append(state)
# Forma a tupla para a função de saída (out-fn), no caso
# o estado não tem qualquer saída.
out_fn.append([state, []])
# Gera as transições necessárias para a máquina de moore.
moore_trans = []
for trans in me[6][1:]:
for new_state in moore_states:
for fn in out_fn:
#!#print(trans, ":", new_state, ":", fn, "=", re.match("^" + trans[1] + r"\**", new_state) and re.match("^" + trans[1] + r"\**", fn[0]) and trans[3] == fn[1])#!#
# Usa os vários dados já obtidos para verificar como as
# transições para a máquina de moore devem ser criadas
# e quais delas devem ser consideradas.
if re.match("^" + trans[1] + r"\**", new_state) and re.match("^" + trans[1] + r"\**", fn[0]) and trans[3] == fn[1]:
# Forma a transição que será adicionada.
temp_trans = [trans[0], fn[0], trans[2]]
# Adciona a nova transição, somente se ele já não tiver
# sido adicionada.
if temp_trans not in moore_trans:
moore_trans.append(temp_trans)
# Define os estados que são finais. Que a princípio, são todos os da
# máquina de mealy, com a diferença de que é necessário observar se
# os estados que foram criados (com '*') também são finais.
moore_finals = []
for final in me[5][1:]:
for moo_state in moore_states:
if re.match("^" + final + r"\**", moo_state):
moore_finals.append(moo_state)
moo.append(["symbols-in"] + me[1][1:])
moo.append(["symbols-out"] + me[2][1:])
moo.append(["states"] + moore_states)
moo.append(["start"] + [me[4][1]])
moo.append(["finals"] + moore_finals)
moo.append(["trans"] + moore_trans)
moo.append(["out-fn"] + out_fn)
#!# print('\nDEBUG:')
#!# print('me[0]', me[0])
#!# print('me[1]', me[1])
#!# print('me[2]', me[2])
#!# print('me[3]', me[3])
#!# print('me[4]', me[4])
#!# print('me[5]', me[5])
#!# print('me[6]', me[6])
#!# print(':END DEBUG\n')
return moo
def moore_to_mealy(moo):
'''Converte o parâmetro 'moo' (que deve ser uma máquina Moore) para
uma máquina de Mealy, que é retornada.
'''
# Verifica se a máquina recebida, realemente é moore.
if moo[0] != 'moore':
raise 'O método moore_to_mealy esperava receber uma máquina de moore como entrada.'
# Cria a máquina de mealy.
me = ['mealy']
# Repete os simbolos de entrada e de entrada.
me.append(['symbols-in'] + moo[1][1:])
me.append(moo[2])
# Repete os estados porém adicionando o 'qe'.
estados = [moo[3][0]] + ['qe'] + moo[3][1: ]
me.append(estados)
# O estado inicial é 'qe'.
me.append(['start', 'qe'])
# Os estados finais são os mesmos.
me.append(moo[5])
# Traduz as transições e saídas da máquina de moore para mealy.
mealy_trans = []
moore_trans = moo[6][1:]
moore_outfn = moo[7][1:]
for trans in moore_trans:
# Busca a saída para aquela mudança de estado.
mealy_trans_output = None
for out in moore_outfn:
if out[0] == trans[1]:
mealy_trans_output = out[1]
# Forma a transição no formato mealy.
mealy_trans_stage = [trans[0], trans[1], trans[2], mealy_trans_output]
# Se a transição for do estado inicial, precisamos adicionalemente
# acrescenta-la como transição do estado 'qe'
if mealy_trans_stage[0] == moo[4][1]:
mealy_trans.append(['qe'] + mealy_trans_stage[1:])
# E adiciona ao conjunto de transições da máquina mealy.
mealy_trans.append(mealy_trans_stage)
# Coloca as transações da mealy dentro da máquina.
me.append(['trans'] + mealy_trans)
#!# print('DEBUG:')
#!# print('moo[0]', moo[0])
#!# print('moo[1]', moo[1])
#!# print('moo[2]', moo[2])
#!# print('moo[3]', moo[3])
#!# print('moo[4]', moo[4])
#!# print('moo[5]', moo[5])
#!# print('moo[6]', moo[6])
#!# print('moo[7]', moo[7][0:-1])
#!# print(':END DEBUG')
return me
|
possatti/memoo
|
converter.py
|
Python
|
mit
| 6,467 | 0.025472 |
# -*- encoding: utf-8 -*-
from __future__ import print_function, unicode_literals, division, absolute_import
from enocean.protocol.eep import EEP
eep = EEP()
# profiles = eep.
def test_first_range():
offset = -40
values = range(0x01, 0x0C)
for i in range(len(values)):
minimum = float(i * 10 + offset)
maximum = minimum + 40
profile = eep.find_profile([], 0xA5, 0x02, values[i])
assert minimum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text)
assert maximum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('max').text)
def test_second_range():
offset = -60
values = range(0x10, 0x1C)
for i in range(len(values)):
minimum = float(i * 10 + offset)
maximum = minimum + 80
profile = eep.find_profile([], 0xA5, 0x02, values[i])
assert minimum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text)
assert maximum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('max').text)
def test_rest():
profile = eep.find_profile([], 0xA5, 0x02, 0x20)
assert -10 == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text)
assert +41.2 == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('max').text)
profile = eep.find_profile([], 0xA5, 0x02, 0x30)
assert -40 == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text)
assert +62.3 == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('max').text)
|
kipe/enocean
|
enocean/protocol/tests/test_temperature_sensors.py
|
Python
|
mit
| 1,616 | 0.005569 |
#!/usr/bin/env python2.7
"""
Compare all sample graphs to baseline graphs (platvcf and g1kvcf).
depends on callVariants.py output directory structure. Can do:
1)kmer set (jaccard and recall)
2)corg overlap
"""
import argparse, sys, os, os.path, random, subprocess, shutil, itertools, glob
import doctest, re, json, collections, time, timeit, string, math, copy
from collections import defaultdict
from Bio.Phylo.TreeConstruction import _DistanceMatrix, DistanceTreeConstructor
from Bio import Phylo
import matplotlib
matplotlib.use('Agg')
import pylab
import networkx as nx
from collections import defaultdict
from toil.job import Job
from toillib import RealTimeLogger, robust_makedirs
from callVariants import alignment_sample_tag, alignment_region_tag, alignment_graph_tag, run
from callVariants import graph_path, sample_vg_path, g1k_vg_path, graph_path, sample_txt_path
from evaluateVariantCalls import defaultdict_set
from vcfQualStats import vcf_qual_stats, balance_tables
def parse_args(args):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Add the Toil options so the job store is the first argument
Job.Runner.addToilOptions(parser)
# General options
parser.add_argument("in_gams", nargs="+",
help="input alignment files")
parser.add_argument("var_dir", type=str,
help="output dir for callVariants.py")
parser.add_argument("graph_dir", type=str,
help="name of input graphs directory")
parser.add_argument("comp_type", type=str,
help="comparison type from {kmer,corg,vcf,sompy,happy,vcfeval}")
parser.add_argument("comp_dir", type=str,
help="directory to write comparison output")
parser.add_argument("--kmer", type=int, default=27,
help="kmer size for indexing")
parser.add_argument("--edge_max", type=int, default=5,
help="edge-max parameter for vg kmer index")
parser.add_argument("--overwrite", action="store_true", default=False,
help="overwrite existing files (indexes and comparison output)")
parser.add_argument("--g1kvcf_path", type=str, default="data/g1kvcf",
help="path to search for 1000 genomes vcf and sequences. expects "
"these to be in <g1kvcf_path>BRCA1.vcf. etc. ")
parser.add_argument("--platinum_path", type=str, default="data/platinum",
help="path to search for platinum genomes vcf. expects "
"these to be in <platinum_path>/<sample>/BRCA1.vcf. etc. ")
parser.add_argument("--chrom_fa_path", type=str, default="data/g1kvcf/chrom.fa",
help="fasta file with entire chromosome info for all regions")
parser.add_argument("--happy_fa_path", type=str, default="data/g1kvcf/chrom2.fa",
help="fasta file with chrXX names for chromosomes. todo- use for above")
parser.add_argument("--gatk3_path", type=str, default="data/gatk3",
help="path to search for gatk3 vcf. expects "
" these to bein <gatk3_path>/<sample>/BRCA1.vcf etc.")
parser.add_argument("--platypus_path", type=str, default="data/platypus",
help="path to search for platypus vcf. expects "
" these to bein <platypus_path>/<sample>/BRCA1.vcf etc.")
parser.add_argument("--freebayes_path", type=str, default="data/freebayes",
help="path to search for freebayes vcf. expects "
" these to bein <freebayes_path>/<sample>/BRCA1.vcf etc.")
parser.add_argument("--samtools_path", type=str, default="data/samtools",
help="path to search for samtools vcf. expects "
" these to bein <samtools_path>/<sample>/BRCA1.vcf etc.")
parser.add_argument("--vg_cores", type=int, default=1,
help="number of cores to give to vg commands (and hap.py)")
parser.add_argument("--timeout", type=int, default=sys.maxint,
help="timeout in seconds for long jobs (vg index and corg in this case)")
parser.add_argument("--orig", action="store_true",
help="do all vs all comparison of input graphs")
parser.add_argument("--sample", action="store_true",
help="do all vs all comparison of sample graphs")
parser.add_argument("--orig_and_sample", action="store_true",
help="do all vs all comparison of sample + input graphs")
parser.add_argument("--ignore", action="append", default=[],
help="keyword to ignore in vcf comparison")
parser.add_argument("--normalize", action="store_true", default =False,
help="run vt normalization on all input vcfs")
parser.add_argument("--clip", type=str, default=None,
help="clip vcf using specified bed file before call comparisons")
parser.add_argument("--clip_fp", type=str, default=None,
help="false positives outside region will be called unknown (hap.py or som.py)")
parser.add_argument("--roc", action="store_true", default=False,
help="generate happy rocs for gatk3 and platypus")
parser.add_argument("--qpct", type=float, default=None,
help="apply quality percentile filter for gatk and platypus and freebayes")
parser.add_argument("--qgraph", action="store_true", default=False,
help="apply quality percentile filter to graphs too")
parser.add_argument("--baseline", type=str, default="platvcf",
help="baseline to use (platvcf or g1kvcf) for vcf comparisons")
parser.add_argument("--gt", action="store_true",
help="take into account genotype information (sompy or vcfeval)")
parser.add_argument("--new", action="store_true",
help="use new caller (vg genotype)")
parser.add_argument("--min_ll", type=float, default=None,
help="apply likelihood filter to vg call vcfs")
parser.add_argument("--filter_type", type=str, default="xaad",
help="used for vcfFilter for curves (when not --new): {xaad, ad, ll, xl}")
parser.add_argument("--dedupe", action="store_true", default=False,
help="use --dedupe option in vcfFilterQuality.py")
parser.add_argument("--vroc", action="store_true", default=False,
help="use vcfevals roc logic (only gives total, not indel snp breakdown) and wont work with clipping")
parser.add_argument("--cwd", default=os.getcwd(),
help="set Toil job working directory")
parser.add_argument("--combine_samples", type=str, default=None,
help="comma-separated list of samples to combine into COMBINED sample")
parser.add_argument("--combined_name", type=str, default="COMBINED",
help="name of the combined sample to generate")
parser.add_argument("--tp_baseline", action="store_true", default=False,
help="use tp-baseline.vcf instead of tp.vcf from vcfeval output for precision and recall")
args = args[1:]
return parser.parse_args(args)
def index_path(graph, options):
""" get the path of the index given the graph
"""
return graph + ".index"
def compute_kmer_index(job, graph, options):
""" run vg index (if necessary) and vg compare on the input
vg indexes are just created in place, ie same dir as graph,
so need to have write permission there
"""
# Move to the appropriate working directory from wherever Toil dropped us
os.chdir(options.cwd)
out_index_path = index_path(graph, options)
do_index = options.overwrite or not os.path.exists(out_index_path)
index_opts = "-s -k {} -t {}".format(options.kmer, options.vg_cores)
if options.edge_max > 0:
index_opts += " -e {}".format(options.edge_max)
if do_index:
os.system("rm -rf {}".format(out_index_path))
run("vg index {} {}".format(index_opts, graph), timeout_sec=options.timeout,
timeout_dep=out_index_path)
def comp_path(graph1, graph2, options):
""" get the path for json output of vg compare
"""
# canonical path for pair
if graph1 > graph2:
graph1, graph2 = graph2, graph1
region1, sample1, method1 = options.tags[graph1]
region2, sample2, method2 = options.tags[graph2]
assert region1 == region2
if sample1 is not None and sample2 is not None:
assert sample1 == sample2
s1tag = "_" + sample1 if sample1 is not None else ""
s2tag = "_" + sample2 if sample2 is not None else ""
return os.path.join(options.comp_dir, "kmer_compare_data", region1,
method1 + s1tag + "_vs_" + method2 + s2tag + ".json")
def corg_path(graph1, graph2, options):
""" get the path for the distance computed via corg lengths
"""
# canonical path for pair (todo corg isnt symmetric!)
if graph1 > graph2:
graph1, graph2 = graph2, graph1
region1, sample1, method1 = options.tags[graph1]
region2, sample2, method2 = options.tags[graph2]
assert region1 == region2
if sample1 is not None and sample2 is not None:
assert sample1 == sample2
s1tag = "_" + sample1 if sample1 is not None else ""
s2tag = "_" + sample2 if sample2 is not None else ""
return os.path.join(options.comp_dir, "corg_compare_data", region1,
method1 + s1tag + "_vs_" + method2 + s2tag + ".txt")
def input_vcf_path(graph, options, region = None, sample = None, method = None):
""" translate a gam to a vcf, with hack ot get at the baseline graphs"""
if region is None or sample is None or method is None:
region, sample, method = options.tags[graph]
# path names are a bit hacky here, but work for platinum samples
if method == "g1kvcf":
return os.path.join(options.platinum_path, sample + ".g1k", region.upper() + ".vcf")
elif method == "platvcf":
return os.path.join(options.platinum_path, sample, region.upper() + ".vcf")
elif method == "gatk3":
return os.path.join(options.gatk3_path, sample, region.upper() + ".vcf")
elif method == "platypus":
return os.path.join(options.platypus_path, sample, region.upper() + ".vcf")
elif method == "freebayes":
return os.path.join(options.freebayes_path, sample, region.upper() + ".vcf")
elif method == "samtools":
return os.path.join(options.samtools_path, sample, region.upper() + ".vcf")
else:
return graph.replace(".vg", ".vcf")
def preprocessed_vcf_path(graph, options):
""" get the path of the sorted vcf (normalized and/or clipped)
"""
region, sample, method = options.tags[graph]
# little hack because preprocessing different depending if graph is
# baseline or not (note it should never be both!)
if method == options.baseline:
method = "{}_basline".format(method)
return os.path.join(options.comp_dir, "preprocessed_vcfs", region,
sample + "_" + method + ".vcf")
def clip_bed_path(graph, options):
""" get a bed to clip vcf by """
return options.clip
def comp_path_vcf(graph1, graph2, options):
""" get the path for json output of vcf compare
"""
region1, sample1, method1 = options.tags[graph1]
region2, sample2, method2 = options.tags[graph2]
assert region1 == region2
if sample1 is not None and sample2 is not None:
assert sample1 == sample2
s1tag = "_" + sample1 if sample1 is not None else ""
s2tag = "_" + sample2 if sample2 is not None else ""
return os.path.join(options.comp_dir, "vcf_compare_data", region1,
method1 + s1tag + "_vs_" + method2 + s2tag + ".json")
def comp_path_sompy(graph1, graph2, options):
""" get the path for json output of vcf compare
"""
region1, sample1, method1 = options.tags[graph1]
region2, sample2, method2 = options.tags[graph2]
assert region1 == region2
if sample1 is not None and sample2 is not None:
assert sample1 == sample2
s1tag = "_" + sample1 if sample1 is not None else ""
s2tag = "_" + sample2 if sample2 is not None else ""
return os.path.join(options.comp_dir, "sompy_compare_data", region1,
method1 + s1tag + "_vs_" + method2 + s2tag + ".stats.csv")
def comp_path_happy(graph1, graph2, options):
""" get the path for json output of vcf compare
"""
region1, sample1, method1 = options.tags[graph1]
region2, sample2, method2 = options.tags[graph2]
assert region1 == region2
if sample1 is not None and sample2 is not None:
assert sample1 == sample2
s1tag = "_" + sample1 if sample1 is not None else ""
s2tag = "_" + sample2 if sample2 is not None else ""
return os.path.join(options.comp_dir, "happy_compare_data", region1,
method1 + s1tag + "_vs_" + method2 + s2tag + ".summary.csv")
def comp_path_vcfeval(graph1, graph2, options, sample_override = None):
""" get the path for json output of vcf compare
"""
region1, sample1, method1 = options.tags[graph1]
region2, sample2, method2 = options.tags[graph2]
assert region1 == region2
if sample_override is not None:
sample1 = sample_override
sample2 = sample_override
if sample1 is not None and sample2 is not None:
assert sample1 == sample2
s1tag = "_" + sample1 if sample1 is not None else ""
s2tag = "_" + sample2 if sample2 is not None else ""
return os.path.join(options.comp_dir, "vcfeval_compare_data", region1,
method1 + s1tag + "_vs_" + method2 + s2tag + ".vcfeval")
def corg_graph_path(graph1, graph2, options):
""" get the path for vg output of corg
"""
b, e = os.path.splitext(corg_path(graph1, graph2, options))
return b + ".vg"
def out_tsv_path(options, region, category, distance, sample = None):
""" get the output tsv path
"""
rtag = region if sample is None else region + "-" + sample
return os.path.join(options.comp_dir, "comp_tables",
category + "-" + distance + "-" + rtag + ".tsv")
def vg_length(vg, options):
""" get sequence length out of vg stats
"""
if not os.path.exists(vg):
return -1
cmd = "vg stats -l {}".format(vg)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=sys.stderr, bufsize=-1)
output, _ = p.communicate()
assert p.wait() == 0
length = int(output.split()[1])
return length
def raw_tsv_path(options, region, category, distance, sample = None):
""" get the output tsv path for "raw" tables (ie with nones for missing data)
"""
rtag = region if sample is None else region + "-" + sample
return os.path.join(options.comp_dir, "comp_tables_raw",
category + "-" + distance + "-" + rtag + ".tsv")
def jaccard_dist_fn(graph1, graph2, options):
""" scrape jaccard dist from vg compare output
"""
jpath = comp_path(graph1, graph2, options)
with open(jpath) as f:
j = json.loads(f.read())
if float(j["union"]) == 0:
jaccard = 2.
else:
jaccard = float(j["intersection"]) / float(j["union"])
return [[1. - jaccard]]
def recall_dist_fn(graph1, graph2, options):
""" assymmetric version of above to compute recall of graph1 on graph2
return recall to be consistent with other functions where similar is smaller.
"""
jpath = comp_path(graph1, graph2, options)
with open(jpath) as f:
j = json.loads(f.read())
if index_path(graph2, options) == j["db2_path"]:
denom = float(j["db2_total"])
else:
assert index_path(graph2, options) == j["db1_path"]
denom = float(j["db1_total"])
intersection = float(j["intersection"])
recall = intersection / denom
return [[recall]]
def precision_dist_fn(graph1, graph2, options):
""" get 1 - precision of graph1 on graph2
"""
return recall_dist_fn(graph2, graph1, options)
def corg_dist_fn(graph1, graph2, options):
""" scrape corg dist from corg output
"""
cpath = corg_path(min(graph1, graph2), max(graph1, graph2), options)
with open(cpath) as f:
c = f.readline().strip()
dist = float(c)
return [[dist]]
def vcf_dist_fn(graph1, graph2, options):
""" scrape vcfCompare data"""
jpath = comp_path_vcf(graph1, graph2, options)
with open(jpath) as f:
j = json.loads(f.read())
path1 = j["Path1"]
path2 = j["Path2"]
query_vcf_path = preprocessed_vcf_path(graph1, options)
# we expect graph2 to be a baseline graph
region2, sample2, method2 = options.tags[graph2]
assert method2 in ["g1kvcf", "platvcf"]
truth_vcf_path = preprocessed_vcf_path(graph2, options)
# do we need to flip ever?
assert path1 == query_vcf_path
assert path2 == truth_vcf_path
return [[j["Alts"]["SNP"]["Precision"],
j["Alts"]["SNP"]["Recall"],
j["Alts"]["MULTIBASE_SNP"]["Precision"],
j["Alts"]["MULTIBASE_SNP"]["Recall"],
j["Alts"]["INDEL"]["Precision"],
j["Alts"]["INDEL"]["Recall"],
j["Alts"]["TOTAL"]["Precision"],
j["Alts"]["TOTAL"]["Recall"],
0]]
def vcf_dist_header(options):
""" header"""
return ["SNP-Precision",
"SNP-Recall",
"MB-Precision",
"MB-Recall",
"INDEL-Precision",
"INDEL-Recall",
"TOT-Precision",
"TOT-Recall",
"QUAL"]
def sompy_dist_fn(graph1, graph2, options):
jpath = comp_path_sompy(graph1, graph2, options)
header = None
snps = None
indels = None
total = None
# read sompy csv output. be a little flexible in terms of row column order (but not names!)
RealTimeLogger.get().warning(jpath)
with open(jpath) as f:
for line in f:
toks = line.split(",")
if len(toks) < 2:
continue
if toks[1] == "type":
header = toks
rec_idx = toks.index("recall")
prec_idx = toks.index("precision")
elif toks[1] == "indels":
indels = toks
elif toks[1] == "SNVs":
snps = toks
elif toks[1] == "records":
total = toks
# indels optional
if indels is None:
indels = [0] * 100
# shoehorn into vcfCompre style output (todo, revise this)
return [[snps[prec_idx],
snps[rec_idx],
0,
0,
indels[prec_idx],
indels[rec_idx],
total[prec_idx],
total[rec_idx],
0]]
def happy_dist_fn(graph1, graph2, options):
jpath = comp_path_happy(graph1, graph2, options)
header = None
snps = None
indels = None
total = None
if options.roc is True and options.tags[graph1][2] in ["gatk3", "platypus", "g1kvcf", "freebayes", "samtools"]:
# read happy roc output.
# todo : indels and total: problem= rocs have differen numbers of lines wwhich doesnt fit interface as is
snp_roc_path = jpath.replace("summary.csv", "roc.snp.all.tsv")
rows = []
with open(snp_roc_path) as f:
for line in f:
toks = line.split()
if "precision" in toks:
prec_idx = toks.index("precision")
rec_idx = toks.index("recall")
else:
rows.append([toks[prec_idx], toks[rec_idx],
0, 0,
0, 0,
0, 0])
return rows
else:
# read happy csv output. be a little flexible in terms of row column order (but not names!)
with open(jpath) as f:
for line in f:
toks = line.strip().split(",")
if len(toks) < 2:
continue
if "METRIC.Recall" in toks:
header = toks
rec_idx = toks.index("METRIC.Recall")
prec_idx = toks.index("METRIC.Precision")
elif toks[0] == "INDEL":
indels = toks
elif toks[0] == "SNP":
snps = toks
elif toks[0] == "Locations":
total = toks
# indels optional
if indels is None:
indels = [0] * 100
# toal optioal
if total is None:
total = [0] * 100
# shoehorn into vcfCompre style output (todo, revise this)
return [[snps[prec_idx],
snps[rec_idx],
0,
0,
indels[prec_idx],
indels[rec_idx],
total[prec_idx],
total[rec_idx],
0]]
def save_vcfeval_stats(out_path, fn_table, fp_table, tp_table):
""" write some summary counts from the vceval vcf output """
def write_table(t, name):
with open(os.path.join(out_path, "comp_counts_{}.tsv".format(name)), "w") as f:
for line in t:
f.write("{}\t{}\t{}\t{}\n".format(line[0], line[1], line[2], line[3]))
write_table(fn_table, "fn")
write_table(fp_table, "fp")
write_table(tp_table, "tp")
def load_vcfeval_stats(out_path):
""" read the counts back from file as list """
def read_table(name):
t = []
with open(os.path.join(out_path, "comp_counts_{}.tsv".format(name))) as f:
for line in f:
if len(line) > 0:
row = line.split("\t")
t += [[float(row[0]), int(row[1]), int(row[2]), int(row[3])]]
return t
fn_table = read_table("fn")
fp_table = read_table("fp")
tp_table = read_table("tp")
balance_tables(fn_table, fp_table, tp_table)
return fn_table, fp_table, tp_table
def vcf_num_records(vcf_path, bed_path = None):
""" use bcftools stats to get the number of snps indels other in vcf """
if not os.path.exists(vcf_path):
return -1
cmd = "bcftools stats {}".format(vcf_path)
if bed_path is not None:
cmd += " -R {}".format(bed_path)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=sys.stderr, bufsize=-1)
output, _ = p.communicate()
assert p.wait() == 0
hits = 0
for line in output.split("\n"):
toks = line.split("\t")
if len(toks) == 4 and toks[0] == "SN":
if toks[2] == "number of SNPs:":
num_snps = int(toks[3])
hits += 1
if toks[2] == "number of MNPs:":
num_mnps = int(toks[3])
hits += 1
elif toks[2] == "number of indels:":
num_indels = int(toks[3])
hits += 1
elif toks[2] == "number of records:":
hits += 1
num_records = int(toks[3])
elif toks[2] == "number of others:":
hits += 1
num_other = int(toks[3])
if hits == 5:
break
# generally, mnps are snps for our purposes (TODO: can separate?)
num_snps += num_mnps
return num_snps, num_indels, num_other
def vcfeval_dist_fn(graph1, graph2, options):
out_dir = comp_path_vcfeval(graph1, graph2, options)
## use vcfevals roc
if options.vroc is True:
roc_path = os.path.join(out_dir, "weighted_roc.tsv")
run("gzip -dc {} > {}".format(roc_path + ".gz", roc_path))
ret = []
with open(roc_path) as f:
for line in f:
if line[0] != "#" and len(line) > 0:
toks = line.split("\t")
precision, sensitivity = float(toks[4]), float(toks[5])
ret += [[0, 0, 0, 0, 0, 0, precision, sensitivity, toks[0]]]
return ret
# it's actually better to compute precision and recall from the vcf output
# because we can do clipping here and it won't break normalization and
# we can do a snp indel breakdown
fn_table, fp_table, tp_table = load_vcfeval_stats(out_dir)
assert len(fn_table) == len(fp_table) and len(fp_table) == len(tp_table)
rows = (0, len(fn_table)) if options.roc else (len(fn_table)-1, len(fn_table))
ret = []
for rownum in range(rows[0], rows[1]):
fns = fn_table[rownum][1]
fni = fn_table[rownum][2]
fno = fn_table[rownum][3]
fps = fp_table[rownum][1]
fpi = fp_table[rownum][2]
fpo = fp_table[rownum][3]
tps = tp_table[rownum][1]
tpi = tp_table[rownum][2]
tpo = tp_table[rownum][3]
fnt = fns + fni + fno
fpt = fps + fpi + fpo
tpt = tps + tpi + tpo
precs = 0. if tps + fps == 0 else float(tps) / float(tps + fps)
recs = 0. if tps + fns == 0 else float(tps) / float(tps + fns)
preci = 0. if tpi + fpi == 0 else float(tpi) / float(tpi + fpi)
reci = 0. if tpi + fni == 0 else float(tpi) / float(tpi + fni)
prect = 0. if tpt + fpt == 0 else float(tpt) / float(tpt + fpt)
rect = 0. if tpt + fnt == 0 else float(tpt) / float(tpt + fnt)
qual = fn_table[rownum][0]
assert qual == fp_table[rownum][0]
assert qual == tp_table[rownum][0]
# shoehorn into vcfCompre style output (todo, revise this) + qual
ret += [[precs, recs, 0, 0, preci, reci, prect, rect, qual]]
return ret
def make_mat(options, row_graphs, column_graphs, dist_fns):
""" make a distance matix """
mat = []
for row in row_graphs:
mat.append([])
for col in column_graphs:
for dist_fn in dist_fns:
mat[-1].append(dist_fn(row, col))
return mat
def n_avg(row):
""" get average skipping Nones """
if not type(row) is list:
return row
a = [x for x in row if x is not None]
if len(a) == 0:
return None
else:
return float(sum(a)) / len(a)
def generate_combined_samples(options):
""" go through comparison output, and merge up all samples in --combine_samples
into a single dummy-sammple called COMBIEND, that will be treated as any
other sample downstream """
# only implemented for vcfeval
if options.comp_type == "vcfeval":
for region in options.sample_graphs.keys():
# pull out all graphs and baselines in the region
region_graphs = [g for g,b in options.pair_comps if options.tags[g][0] == region]
region_baselines = [b for g,b in options.pair_comps if options.tags[g][0] == region]
# will iterate over METHOD X BASELINE
method_set = set([options.tags[g][2] for g in region_graphs])
baseline_method_set = set([options.tags[b][2] for b in region_baselines])
for method in method_set:
graphs = [g for g in region_graphs if options.tags[g][2] == method]
for baseline_method in baseline_method_set:
baselines = [b for b in region_baselines if options.tags[b][2] == baseline_method]
generate_combined_vcfeval_sample(region, method, baseline_method, graphs, baselines, options)
def generate_combined_vcfeval_sample(region, method, baseline_method, graphs, baselines, options):
""" merge up a single set of graph results into one """
in_vcfs = [] # triple of (fn, fp, tp)
baseline = baselines[0]
sample_baseline = options.tags[baseline][1]
comb_samples = options.combine_samples.split(",")
tp_name = "tp-baseline" if options.tp_baseline is True else "tp"
# we are iterating over every sample for a given method/region, checking if we want to combine
# and remembering it's vcfeval output in in_vcfs
for graph in graphs:
out_vcfeval_dir = comp_path_vcfeval(graph, baseline, options, sample_override=options.combined_name)
gregion, sample, gmethod = options.tags[graph]
assert gregion == region and gmethod == method
# scape together all the vcfeval results
if sample in comb_samples:
in_vcfeval_dir = comp_path_vcfeval(graph, baseline, options, sample_override=sample)
results = (os.path.join(in_vcfeval_dir, "fn.vcf.gz"),
os.path.join(in_vcfeval_dir, "fp.vcf.gz"),
os.path.join(in_vcfeval_dir, "{}.vcf.gz".format(tp_name)))
if all(os.path.exists(x) for x in results):
in_vcfs.append(results)
else:
RealTimeLogger.get().warning("Missing vcfeval result for {}".format(graph))
# if we have a fn/fp/tp vcf for each sample to combine, then merge them
# all up into the COMBINED output directory
if len(in_vcfs) == len(comb_samples):
out_vcfs = (os.path.join(out_vcfeval_dir, "fn.vcf"),
os.path.join(out_vcfeval_dir, "fp.vcf"),
os.path.join(out_vcfeval_dir, "{}.vcf".format(tp_name)))
if options.overwrite or not all(os.path.isfile(ov + ".gz") for ov in out_vcfs):
RealTimeLogger.get().info("Making combined sample {}".format(out_vcfeval_dir))
# paste together all the vcfs and sort
robust_makedirs(out_vcfeval_dir)
for i in range(3):
run("bcftools view {} > {}".format(in_vcfs[0][i], out_vcfs[i]))
for j in range(1, len(in_vcfs)):
run("bcftools view -H {} >> {}".format(in_vcfs[j][i], out_vcfs[i]))
for i in range(3):
run("vcfsort {} > {}.sort".format(out_vcfs[i], out_vcfs[i]))
run("mv {}.sort {}".format(out_vcfs[i], out_vcfs[i]))
run("bgzip -f {}".format(out_vcfs[i]))
run("tabix -f -p vcf {}.gz".format(out_vcfs[i]))
# generate the roc tables.
# this is slow if clipping enabled, and would ideally get moved under toil
fn_table = vcf_qual_stats(out_vcfs[0] + ".gz", options.clip, ignore_keywords = ["OverlapConflict"])
fp_table = vcf_qual_stats(out_vcfs[1] + ".gz", options.clip_fp if options.clip_fp else options.clip)
tp_table = vcf_qual_stats(out_vcfs[2] + ".gz", options.clip)
save_vcfeval_stats(out_vcfeval_dir, fn_table, fp_table, tp_table)
# now we stick a new entry back in options tables so COMBINED gets iterated over
# in the table generation.
comb_graph = graph.replace(sample, options.combined_name)
assert comb_graph != graph
options.sample_graphs[region][options.combined_name].add(comb_graph)
options.tags[comb_graph] = (region, options.combined_name, method)
comb_baseline = baseline.replace(sample_baseline, options.combined_name)
assert comb_baseline != baseline
options.baseline_graphs[region][options.combined_name].add(comb_baseline)
options.tags[comb_baseline] = (region, options.combined_name, baseline_method)
else:
RealTimeLogger.get().warning("Failed to combine samples for {} vs {} in {}".format(method, baseline_method, region))
RealTimeLogger.get().warning("{}".format(str(in_vcfs)))
def make_tsvs(options):
""" make some tsv files in the output dir
"""
if options.comp_type == "kmer":
dist_names = ["Jaccard-Dist", "Precision", "Recall"]
dist_fns = [jaccard_dist_fn, precision_dist_fn, recall_dist_fn]
elif options.comp_type == "corg":
dist_names = ["Corg-Dist"]
dist_fns = [corg_dist_fn]
elif options.comp_type == "vcf":
dist_names = vcf_dist_header(options)
dist_fns = [vcf_dist_fn]
elif options.comp_type == "sompy":
dist_names = vcf_dist_header(options)
dist_fns = [sompy_dist_fn]
elif options.comp_type == "happy":
dist_names = vcf_dist_header(options)
dist_fns = [happy_dist_fn]
elif options.comp_type == "vcfeval":
dist_names = vcf_dist_header(options)
dist_fns = [vcfeval_dist_fn]
else:
assert False
# break apart by region
for region in options.sample_graphs.keys():
# do the baseline tsvs. this is one row per graph,
# with one column per comparison type with truth
#for baseline in ["g1kvcf", "platvcf"]:
for baseline in ["platvcf"]:
for sample in options.sample_graphs[region].keys():
header = dist_names
RealTimeLogger.get().info("Making {} baseline tsv for {} {}".format(baseline, region, sample))
mat = []
row_labels = []
for truth in options.baseline_graphs[region][sample]:
if options.tags[truth][2] == baseline:
for graph in options.sample_graphs[region][sample]:
rows = []
for d in dist_fns:
try:
dist_res = d(graph, truth, options)
except Exception as e:
RealTimeLogger.get().warning("Unable to retrieve distance between {} and {} because {}".format(graph, truth, e))
dist_res = [[None] * len(header)]
for ri, r in enumerate(dist_res):
if d == dist_fns[0]:
rows.append(r)
row_labels.append(options.tags[graph][2])
else:
rows[ri] += r
for row in rows:
mat.append(row)
break # shoud not be necessary
# write the baseline matrix (with None for missing data) to file
tsv_path = raw_tsv_path(options, region, baseline, options.comp_type, sample)
write_tsv(tsv_path, mat, header, row_labels, "Graph")
# remove Nones and write tsv again
clean_mat, clean_header, clean_row_labels = remove_nones(mat, header, row_labels)
tsv_path = out_tsv_path(options, region, baseline, options.comp_type, sample)
write_tsv(tsv_path, clean_mat, clean_header, clean_row_labels, "Graph")
# sample vs sample heatmap
if options.sample:
for di, d in enumerate(dist_fns):
RealTimeLogger.get().info("Making {} sample tsv for {}".format(dist_names[di], region))
sample_vs_sample = defaultdict(lambda : defaultdict(list))
for sample in options.sample_graphs[region].keys():
for graph1 in options.sample_graphs[region][sample]:
for graph2 in options.sample_graphs[region][sample]:
try:
dist_res = d(graph1, graph2, options)
except Exception as e:
RealTimeLogger.get().warning("Unable to retrieve distance between {} and {} because {}".format(graph1, graph2, e))
dist_res = [[None] * len(header)]
# only ever deal with 1 row here
assert len(dist_res) == 1
sample_vs_sample[options.tags[graph1][2]][options.tags[graph2][2]].append(dist_res[0][0])
#average over samples
for x in sample_vs_sample.keys():
for y in sample_vs_sample.keys():
sample_vs_sample[x][y] = n_avg(sample_vs_sample[x][y])
# make matrix
mat = []
header = sample_vs_sample.keys()
row_labels = sample_vs_sample.keys()
for x in row_labels:
row = []
for y in header:
row.append(sample_vs_sample[x][y])
mat.append(row)
# write the heatmap matrix (with None for missing data) to file
tsv_path = raw_tsv_path(options, region, "hm-sample", dist_names[di])
write_tsv(tsv_path, mat, header, row_labels, "Graph")
# remove Nones and write tsv again
clean_mat, clean_header, clean_row_labels = remove_nones(mat, header, row_labels)
tsv_path = out_tsv_path(options, region, "hm-sample", dist_names[di])
write_tsv(tsv_path, clean_mat, clean_header, clean_row_labels, "Graph")
# original vs original heatmap
if options.orig:
RealTimeLogger.get().info("Making {} orig tsv for {}".format(dist_names[di], region))
for di, d in enumerate(dist_fns):
orig_vs_orig = defaultdict(lambda : defaultdict(list))
for graph1 in options.orig_graphs[region]:
for graph2 in options.orig_graphs[region]:
try:
dist_res = d(graph1, graph2, options)
except Exception as e:
RealTimeLogger.get().warning("Unable to retrieve distance between {} and {} because {}".format(graph1, graph2, e))
dist_res = [[None] * len(header)]
# only ever deal with 1 row here
assert len(dist_res) == 1
orig_vs_orig[options.tags[graph1][2]][options.tags[graph2][2]] = dist_res[0][0]
# make matrix
mat = []
header = orig_vs_orig.keys()
row_labels = orig_vs_orig.keys()
for x in row_labels:
row = []
for y in header:
row.append(orig_vs_orig[x][y])
mat.append(row)
# write the heatmap matrix (with None for missing data) to file
tsv_path = raw_tsv_path(options, region, "hm-orig", dist_names[di])
write_tsv(tsv_path, mat, header, row_labels, "Graph")
# remove Nones and write tsv again
clean_mat, clean_header, clean_row_labels = remove_nones(mat, header, row_labels)
tsv_path = out_tsv_path(options, region, "hm-orig", dist_names[di])
write_tsv(tsv_path, clean_mat, clean_header, clean_row_labels, "Graph")
# sample vs original heatmap
if options.orig_and_sample:
for di, d in enumerate(dist_fns):
RealTimeLogger.get().info("Making {} sample tsv for {}".format(dist_names[di], region))
sample_vs_orig = defaultdict(lambda : defaultdict(list))
for sample in options.sample_graphs[region].keys():
for graph1 in options.sample_graphs[region][sample].union(options.orig_graphs[region]):
for graph2 in options.sample_graphs[region][sample].union(options.orig_graphs[region]):
try:
dist_res = d(graph1, graph2, options)
except Exception as e:
RealTimeLogger.get().warning("Unable to retrieve distance between {} and {} because {}".format(graph1, graph2, e))
dist_res = [[None] * len(header)]
# only ever deal with 1 row here
assert len(dist_res) == 1
name_1 = options.tags[graph1][2]
if graph1 in options.sample_graphs[region][sample]:
name_1 = "sample-" + name_1
else:
name_1 = "base-" + name_1
name_2 = options.tags[graph2][2]
if graph2 in options.sample_graphs[region][sample]:
name_2 = "sample-" + name_2
else:
name_2 = "base-" + name_2
sample_vs_orig[name_1][name_2].append(dist_res[0][0])
#average over samples
for x in sample_vs_orig.keys():
for y in sample_vs_orig.keys():
sample_vs_orig[x][y] = n_avg(sample_vs_orig[x][y])
# make matrix
mat = []
header = sample_vs_orig.keys()
row_labels = sample_vs_orig.keys()
for x in row_labels:
row = []
for y in header:
row.append(sample_vs_orig[x][y])
mat.append(row)
# write the heatmap matrix (with None for missing data) to file
tsv_path = raw_tsv_path(options, region, "hm-orig_and_sample", dist_names[di])
write_tsv(tsv_path, mat, header, row_labels, "Graph")
# remove Nones and write tsv again
clean_mat, clean_header, clean_row_labels = remove_nones(mat, header, row_labels)
tsv_path = out_tsv_path(options, region, "hm-orig_and_sample", dist_names[di])
write_tsv(tsv_path, clean_mat, clean_header, clean_row_labels, "Graph")
def write_tsv(out_path, mat, col_names, row_names, row_label):
""" write tsv distance matrx
"""
if len(mat) == 0 or len(mat[0]) == 0:
RealTimeLogger.get().warning("Unable to write {} because input matrix empty".format(out_path))
return
robust_makedirs(os.path.dirname(out_path))
with open(out_path, "w") as f:
# header
f.write("{}\t".format(row_label) + "\t".join(col_names) + "\n")
for i, row_name in enumerate(row_names):
f.write(row_name)
for j, col_name in enumerate(col_names):
f.write("\t{}".format(mat[i][j]))
f.write("\n")
def read_tsv(in_path):
""" opposite of above
"""
with open(in_path) as f:
# header
line = f.readline()
toks = line[:-1].split("\t")
row_label = toks[0]
col_names = toks[1:]
row_names = []
# body
mat = []
for line in f:
toks = line[:-1].split("\t")
row_names.append(toks[0])
toks = map(lambda x : None if x == "None" else float(x), toks[1:])
mat.append(toks)
return mat, col_names, row_names, row_label
def remove_nones(mat, col_names, row_names):
""" Naive greedy remove of rows and columns with None elements.
idea find row or column with most Nones. remove it. repeat.
haven't given this too much thought.
"""
keep_going = True
while keep_going is True:
if len(row_names) == 0:
break
assert len(mat) == len(row_names)
row_counts = [0 for x in range(len(row_names))]
col_counts = [0 for x in range(len(col_names))]
# could be moved outside loop but that'd be too clever
for i in range(len(row_names)):
for j in range(len(col_names)):
if mat[i][j] == None:
row_counts[i] += 1
col_counts[j] += 1
row_max = max(row_counts)
col_max = max(col_counts)
# normalize by length
row_frac_max = float(row_max) / float(len(col_counts))
col_frac_max = float(col_max) / float(len(row_counts))
if row_max > 0 and row_frac_max >= col_frac_max:
idx = row_counts.index(row_max)
del mat[idx]
del row_names[idx]
elif col_frac_max > row_frac_max:
idx = col_counts.index(col_max)
for i in range(len(row_names)):
del mat[i][idx]
del col_names[idx]
else:
keep_going = False
return mat, col_names, row_names
def compute_kmer_comparison(job, graph1, graph2, options):
""" run vg compare between two graphs
"""
# Move to the appropriate working directory from wherever Toil dropped us
os.chdir(options.cwd)
out_path = comp_path(graph1, graph2, options)
graph1_index_path = index_path(graph1, options)
assert os.path.exists(graph1_index_path)
graph2_index_path = index_path(graph2, options)
assert os.path.exists(graph2_index_path)
do_comp = options.overwrite or not os.path.exists(out_path)
if do_comp:
if os.path.isfile(out_path):
os.remove(out_path)
robust_makedirs(os.path.dirname(out_path))
run("vg compare {} {} -i -t {} > {}".format(graph1, graph2,
min(options.vg_cores, 2), out_path))
def compute_corg_comparison(job, graph1, graph2, options):
""" run corg on the graphs. store the output in a text file
"""
# Move to the appropriate working directory from wherever Toil dropped us
os.chdir(options.cwd)
out_path = corg_path(graph1, graph2, options)
corg_vg = corg_graph_path(graph1, graph2, options)
do_comp = options.overwrite or not os.path.exists(out_path)
if do_comp:
if os.path.isfile(out_path):
os.remove(out_path)
robust_makedirs(os.path.dirname(out_path))
run("corg {} {} --kmers-only -e {} -k {} -t {} > {} 2> {}".format(graph1, graph2, options.edge_max,
options.kmer, options.vg_cores, corg_vg,
out_path.replace(".txt", ".log")),
timeout_sec=options.timeout,
timeout_dep=out_path)
len1 = vg_length(graph1, options)
len2 = vg_length(graph2, options)
lenC = vg_length(corg_vg, options)
# corg screwing up will leave an empty vg which gives length 0
if lenC == 0:
corg_val = "error: corg graph not computed. see .log"
else:
corg_val = abs(1. - (2. * lenC) / float(len1 + len2))
with open(out_path, "w") as f:
f.write("{}\n".format(corg_val))
def preprocess_vcf(job, graph, options):
""" run vt normalize and bed clip"""
# Move to the appropriate working directory from wherever Toil dropped us
os.chdir(options.cwd)
input_vcf = input_vcf_path(graph, options)
output_vcf = preprocessed_vcf_path(graph, options)
robust_makedirs(os.path.dirname(output_vcf))
run("scripts/vcfsort {} > {}".format(input_vcf, output_vcf), fail_hard=True)
if options.qpct is not None and (options.tags[graph][2] in ["gatk3", "platypus", "freebayes", "samtools"] or
(options.tags[graph][2] == "g1kvcf" and options.baseline != "g1kvcf") or
options.qgraph is True):
# g1kvcf has no quality info. proxy with read depth to at least get a curve
#filter_opts = "--info DP" if options.tags[graph][2] == "g1kvcf" else ""
if "platvcf" not in options.tags[graph][2]:
filter_opts = ""
vcfFQcmd = "cat {} | ".format(output_vcf)
if options.tags[graph][2] not in ["gatk3", "platypus", "freebayes", "samtools", "g1kvcf", "platvcf", "platvcf-baseline"]:
#filter_opts += " --info DP"
filter_opts += " --{}".format(options.filter_type) if not options.new else " --ad"
if options.dedupe:
filter_opts += " --dedupe"
vcfFQcmd += "scripts/vcfFilterQuality.py - {} --pct {} --set_qual > {}".format(options.qpct,
filter_opts,
output_vcf + ".qpct")
run(vcfFQcmd)
run("cp {} {}".format(output_vcf + ".qpct", output_vcf))
if options.normalize is True:# and options.tags[graph][2] not in ["gatk3", "platypus", "freebayes", "samtools", "g1kvcf", "platvcf", "platvcf-baseline"]:
# run blocksub (only on single allelic)
sts = run("bcftools filter -e \"N_ALT > 1\" {} | vt decompose_blocksub -a - > {}".format(
output_vcf,
output_vcf + ".vt"))
# add back multiallelic variants
run("bcftools filter -i \"N_ALT > 1\" {} | bcftools view - -H >> {}".format(
output_vcf,
output_vcf + ".vt"))
if sts != 0:
run("rm {}".format(output_vcf))
return
run("cp {} {}".format(output_vcf + ".vt", output_vcf))
if options.clip is not None and options.comp_type != "vcfeval":
clip_bed = clip_bed_path(graph, options)
if not os.path.isfile(clip_bed):
RealTimeLogger.get().warning("Clip bed file not found {}".format(clip_bed))
else:
run("bgzip {}".format(output_vcf), fail_hard=True)
run("tabix -f -p vcf {}".format(output_vcf + ".gz"), fail_hard=True)
run("bcftools view {} -R {} > {}".format(output_vcf + ".gz", clip_bed, output_vcf), fail_hard=True)
run("rm {}".format(output_vcf + ".gz*"))
run("scripts/vcfsort {} | vcfuniq > {}".format(output_vcf, output_vcf + ".sort"), fail_hard=True)
run("cp {} {}".format(output_vcf + ".sort", output_vcf), fail_hard=True)
# one final sort, and strip ignored variants
ig_opts = ""
for ignore_keyword in options.ignore:
ig_opts += " | grep -v {}".format(ignore_keyword)
# also strip genotypes
if not options.gt and options.comp_type != "vcfeval":
ig_opts += " | scripts/vcfSetGenotypes.py -"
run("mv {} {} ; scripts/vcfsort {} {} | vcfuniq > {}".format(output_vcf, output_vcf + ".ig",
output_vcf + ".ig", ig_opts,
output_vcf), fail_hard=True)
# need compressed index for vcfeval
run("bgzip {} -c > {}".format(output_vcf, output_vcf + ".gz"), fail_hard=True)
run("tabix -f -p vcf {}".format(output_vcf + ".gz"), fail_hard=True)
def compute_vcf_comparison(job, graph1, graph2, options):
""" run vcf compare between two graphs
"""
# Move to the appropriate working directory from wherever Toil dropped us
os.chdir(options.cwd)
if options.comp_type == "sompy":
out_path = comp_path_sompy(graph1, graph2, options)
elif options.comp_type == "happy":
out_path = comp_path_happy(graph1, graph2, options)
elif options.comp_type == "vcfeval":
out_path = comp_path_vcfeval(graph1, graph2, options)
else:
out_path = comp_path_vcf(graph1, graph2, options)
# we expect graph1 to be a sample graph
region1, sample1, method1 = options.tags[graph1]
assert method1 != "None" and "platvcf" not in method1
# we expect graph2 to be a baseline graph
region2, sample2, method2 = options.tags[graph2]
assert method2 in ["g1kvcf", "platvcf"]
assert region1 == region2
assert sample1 == sample2
# get the vcf of the sample graph
query_vcf_path = preprocessed_vcf_path(graph1, options)
# and the baseline
truth_vcf_path = preprocessed_vcf_path(graph2, options)
do_comp = options.overwrite or not os.path.exists(out_path)
if do_comp:
if os.path.isfile(out_path):
os.remove(out_path)
robust_makedirs(os.path.dirname(out_path))
if options.comp_type == "sompy":
sp_opts = "-P"
if options.clip_fp:
sp_opts += " -f {}".format(options.clip_fp)
# doesn't seem to work
#if options.clip:
# sp_opts += " -R {}".format(options.clip)
run("export HGREF={} ; som.py {} {} --output {} {} 2> {}".format(options.chrom_fa_path, truth_vcf_path,
query_vcf_path, out_path.replace(".stats.csv", ""),
sp_opts, out_path + ".stderr"))
elif options.comp_type == "happy":
hp_opts = ""
# cant figure out how to get vcfeval working in happy. leave off for now
#hp_opts += "--engine vcfeval --engine-vcfeval-path rtg --engine-vcfeval-template {}".format(options.chrom_sdf_path)
if options.clip_fp:
hp_opts += " -f {}".format(options.clip_fp)
# doesn't seem to work
#if options.clip:
# hp_opts += " -R {}".format(options.clip)
# make roc curves for gatk and platypus (hardcoding name check as hack for now)
if method1 in ["gatk3", "platypus", "g1kvcf", "freebayes", "samtools"] and options.roc is True:
hp_opts += " -V --roc Q_GQ --roc-filter LowGQX"
# since we use just numbers for chrom names in the vcf, use options.happy_fa_path as a hack to make happy happy.
run("export HGREF={} ; hap.py {} {} -o {} --threads {} {} 2> {}".format(options.happy_fa_path, truth_vcf_path,
query_vcf_path, out_path.replace(".summary.csv", ""),
options.vg_cores, hp_opts, out_path + ".stderr"))
elif options.comp_type == "vcf":
vc_opts = ""
for ignore_keyword in options.ignore:
vc_opts += " -i {}".format(ignore_keyword)
run("scripts/vcfCompare.py {} {} {} > {}".format(query_vcf_path, truth_vcf_path, vc_opts, out_path))
elif options.comp_type == "vcfeval":
ve_opts = "" if options.gt else "--squash-ploidy"
# doesn't seem to work
#if options.clip:
# ve_opts += " --bed-regions={}".format(options.clip)
# indexing and compression was done by preprocessing phase
run("rm -rf {}".format(out_path))
run("rtg vcfeval -b {}.gz -c {}.gz --all-records --ref-overlap --vcf-score-field QUAL -t {} {} -o {}".format(truth_vcf_path, query_vcf_path,
options.chrom_sdf_path, ve_opts,
out_path))
tp_name = "tp-baseline" if options.tp_baseline is True else "tp"
# count up output
fn_path = os.path.join(out_path, "fn.vcf.gz")
fp_path = os.path.join(out_path, "fp.vcf.gz")
tp_path = os.path.join(out_path, "{}.vcf.gz".format(tp_name))
try:
fn_table = vcf_qual_stats(fn_path, options.clip, ignore_keywords = ["OverlapConflict"])
fp_table = vcf_qual_stats(fp_path, options.clip_fp if options.clip_fp else options.clip)
tp_table = vcf_qual_stats(tp_path, options.clip)
save_vcfeval_stats(out_path, fn_table, fp_table, tp_table)
except:
pass
def compute_kmer_comparisons(job, options):
""" run vg compare in parallel on all the graphs,
outputting a json file for each
"""
# Move to the appropriate working directory from wherever Toil dropped us
os.chdir(options.cwd)
RealTimeLogger.get().info("Running vg compare on {} pairs of input graphs".format(
len(options.pair_comps)))
for pair_comp in options.pair_comps:
graph1, graph2 = pair_comp[0], pair_comp[1]
out_path = comp_path(graph1, graph2, options)
if options.overwrite or not os.path.exists(out_path):
job.addChildJobFn(compute_kmer_comparison, graph1, graph2, options,
cores=min(options.vg_cores, 2))
def compute_corg_comparisons(job, options):
""" run corg compare on all corg-ablegraphs.
"""
# Move to the appropriate working directory from wherever Toil dropped us
os.chdir(options.cwd)
RealTimeLogger.get().info("Running corg comparison {} pairs of input graphs".format(
len(options.pair_comps)))
for pair_comp in options.pair_comps:
graph1, graph2 = pair_comp[0], pair_comp[1]
out_path = corg_path(graph1, graph2, options)
if options.overwrite or not os.path.exists(out_path):
job.addChildJobFn(compute_corg_comparison, graph1, graph2, options,
cores=options.vg_cores)
def compute_vcf_comparisons(job, options):
""" run vg compare in parallel on all the graphs,
outputting a json file for each
"""
# Move to the appropriate working directory from wherever Toil dropped us
os.chdir(options.cwd)
RealTimeLogger.get().info("Running vcf comparison {} pairs of input graphs".format(
len(options.pair_comps)))
for pair_comp in options.pair_comps:
graph1, graph2 = pair_comp[0], pair_comp[1]
out_path = comp_path_vcf(graph1, graph2, options)
if options.overwrite or not os.path.exists(out_path):
cores = options.vg_cores if options.comp_type == "happy" else 1
job.addChildJobFn(compute_vcf_comparison, graph1, graph2, options,
cores=cores)
def compute_kmer_indexes(job, options):
""" run everything (root toil job)
first all indexes are computed,
then all comparisons (follow on)
then summary (follow on of that)
"""
# Move to the appropriate working directory from wherever Toil dropped us
os.chdir(options.cwd)
# do all the indexes
input_set = set()
for pair_comp in options.pair_comps:
input_set.add(pair_comp[0])
input_set.add(pair_comp[1])
if options.comp_type in ["kmer", "corg"]:
RealTimeLogger.get().info("Computing indexes for {} input graphs".format(len(input_set)))
for graph in input_set:
if options.overwrite or not os.path.exists(index_path(graph, options)):
job.addChildJobFn(compute_kmer_index, graph, options, cores=options.vg_cores)
if options.comp_type in ["vcf", "sompy", "happy", "vcfeval"]:
RealTimeLogger.get().info("Preprocessing {} input vcfs".format(len(input_set)))
for graph in input_set:
if options.overwrite or not os.path.isfile(preprocessed_vcf_path(graph, options)):
job.addChildJobFn(preprocess_vcf, graph, options, cores=1)
# do the comparisons
if options.comp_type == "kmer":
job.addFollowOnJobFn(compute_kmer_comparisons, options, cores=1)
elif options.comp_type == "corg":
job.addFollowOnJobFn(compute_corg_comparisons, options, cores=1)
elif options.comp_type in ["vcf", "sompy", "happy", "vcfeval"]:
job.addFollowOnJobFn(compute_vcf_comparisons, options, cores=1)
def breakdown_gams(in_gams, orig, orig_and_sample, options):
""" use callVariants methods to find all the relevant graphs given
a list of input gams"""
# sort through the input, and make some dictionaries splitting
# up the different vg files by region, sample, algorithm.
orig_graphs = defaultdict(set)
sample_graphs = defaultdict(defaultdict_set)
baseline_graphs = defaultdict(defaultdict_set)
#other direction
tags = dict()
# hack in dummy files for total vcfs
total_gams = set()
if options.comp_type in ["vcf", "sompy", "happy", "vcfeval"]:
for input_gam in in_gams:
region = alignment_region_tag(input_gam, options)
# dangerous :: fix
dummy_gam = input_gam.replace(region, "total")
total_gams.add(dummy_gam)
for input_gam in in_gams + list(total_gams):
region = alignment_region_tag(input_gam, options)
sample = alignment_sample_tag(input_gam, options)
method = alignment_graph_tag(input_gam, options)
orig_path = graph_path(input_gam, options)
sample_path = sample_vg_path(input_gam, options)
g1kvcf_path = g1k_vg_path(input_gam, False, False, options)
platvcf_path = g1k_vg_path(input_gam, True, False, options)
if orig or orig_and_sample:
orig_graphs[region].add(orig_path)
tags[orig_path] = (region, None, method)
def test_path(graph, method):
if options.comp_type in ["vcf", "sompy", "happy", "vcfeval"]:
return input_vcf_path(graph, options, region, sample, method)
return graph
if os.path.isfile(test_path(sample_path, method)):
sample_graphs[region][sample].add(sample_path)
else:
sys.stderr.write("WARNING, input VCF not found: {}\n".format(
test_path(sample_path, method)))
# we dont expect to have baselines for every sample
if options.baseline == "g1kvcf" and os.path.isfile(test_path(g1kvcf_path, "g1kvcf")):
baseline_graphs[region][sample].add(g1kvcf_path)
if options.baseline == "platvcf" and os.path.isfile(test_path(platvcf_path, "platvcf")):
baseline_graphs[region][sample].add(platvcf_path)
# add external vcfs as sample graphs, rely on tags to tell them apart
if options.comp_type in ["vcf", "sompy", "happy", "vcfeval"]:
gatk3_path = input_vcf_path(None, options, region, sample, "gatk3")
if os.path.isfile(gatk3_path):
sample_graphs[region][sample].add(gatk3_path)
tags[gatk3_path] = (region, sample, "gatk3")
platypus_path = input_vcf_path(None, options, region, sample, "platypus")
if os.path.isfile(platypus_path):
sample_graphs[region][sample].add(platypus_path)
tags[platypus_path] = (region, sample, "platypus")
freebayes_path = input_vcf_path(None, options, region, sample, "freebayes")
if os.path.isfile(freebayes_path):
sample_graphs[region][sample].add(freebayes_path)
tags[freebayes_path] = (region, sample, "freebayes")
samtools_path = input_vcf_path(None, options, region, sample, "samtools")
if os.path.isfile(samtools_path):
sample_graphs[region][sample].add(samtools_path)
tags[samtools_path] = (region, sample, "samtools")
#if options.baseline != "g1kvcf" and os.path.isfile(test_path(g1kvcf_path, "g1kvcf")):
# sample_graphs[region][sample].add(g1kvcf_path)
tags[sample_path] = (region, sample, method)
tags[g1kvcf_path] = (region, sample, "g1kvcf")
tags[platvcf_path] = (region, sample, "platvcf")
return orig_graphs, sample_graphs, baseline_graphs, tags
def main(args):
options = parse_args(args)
assert options.comp_type in ["corg", "kmer", "vcf", "sompy", "happy", "vcfeval"]
if options.comp_type in ["vcf", "sompy", "happy", "vcfeval"]:
assert not options.orig and not options.orig_and_sample
if options.orig_and_sample:
options.orig = True
options.sample = True
if options.comp_type in ["vcfeval"]:
options.chrom_sdf_path = os.path.join(options.comp_dir, "chrom.sdf")
if options.overwrite or not os.path.exists(options.chrom_sdf_path):
# need to reformat the fasta as sdf for vcfeval
run("rm -rf {}; rtg format {} -o {}".format(
options.chrom_sdf_path, options.chrom_fa_path, options.chrom_sdf_path))
RealTimeLogger.start_master()
# since we re-use callVariants methods
options.out_dir = options.var_dir
# find all the graphs
# clean out shell artifacts
in_gams = []
for gam in options.in_gams:
if os.path.isfile(gam):
in_gams.append(gam)
else:
RealTimeLogger.get().warning("skipping {}".format(gam))
options.in_gams = in_gams
breakdown = breakdown_gams(options.in_gams, options.orig,
options.orig_and_sample, options)
options.orig_graphs = breakdown[0]
options.sample_graphs = breakdown[1]
options.baseline_graphs = breakdown[2]
options.tags = breakdown[3]
options.pair_comps = []
# determine all pairwise comparisons between original gaphs
for region in options.orig_graphs.keys():
for graph1 in options.orig_graphs[region]:
for graph2 in options.orig_graphs[region]:
if graph1 <= graph2:
options.pair_comps.append((graph1, graph2))
# optional original vs sample
if options.orig_and_sample:
for sample in options.sample_graphs[region].keys():
for graph2 in options.sample_graphs[region][sample]:
options.pair_comps.append((graph1, graph2))
# now all sample vs baseline comparisons
for region in options.sample_graphs.keys():
for sample in options.sample_graphs[region].keys():
for graph1 in options.sample_graphs[region][sample]:
for graph2 in options.baseline_graphs[region][sample]:
# skip g1kvcf vs g1kvcf comparison
if options.tags[graph1] != options.tags[graph2]:
options.pair_comps.append((graph1, graph2))
# optional smaple vs sample
if options.sample:
for graph2 in options.sample_graphs[region][sample]:
if graph1 <= graph2:
options.pair_comps.append((graph1, graph2))
# Make a root job
root_job = Job.wrapJobFn(compute_kmer_indexes, options,
cores=1, memory="2G", disk=0)
# Run it and see how many jobs fail
Job.Runner.startToil(root_job, options)
# combine up some output across all samples, making a dummy
# combined sample for make_tsvs to iterate over
if options.combine_samples is not None:
generate_combined_samples(options)
# munge through results to make a matrix (saved to tsv file)
make_tsvs(options)
RealTimeLogger.stop_master()
if __name__ == "__main__" :
sys.exit(main(sys.argv))
|
adamnovak/hgvm-graph-bakeoff-evalutations
|
scripts/computeVariantsDistances.py
|
Python
|
mit
| 67,700 | 0.005628 |
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""Python 2<->3 compatibility module"""
import sys
def print_(template, *args, **kwargs):
template = str(template)
if args:
template = template % args
elif kwargs:
template = template % kwargs
sys.stdout.writelines(template)
if sys.version_info < (3, 0):
basestring = basestring
from ConfigParser import ConfigParser
from urllib import unquote
iteritems = lambda d: d.iteritems()
dictkeys = lambda d: d.keys()
def reraise(t, e, tb):
exec('raise t, e, tb', dict(t=t, e=e, tb=tb))
else:
basestring = str
from configparser import ConfigParser
from urllib.parse import unquote
iteritems = lambda d: d.items()
dictkeys = lambda d: list(d.keys())
def reraise(t, e, tb):
raise e.with_traceback(tb)
|
grepme/CMPUT410Lab01
|
virt_env/virt1/lib/python2.7/site-packages/PasteDeploy-1.5.2-py2.7.egg/paste/deploy/compat.py
|
Python
|
apache-2.0
| 961 | 0.007284 |
#How to run this:
#Python libraries needed to run this file: Flask, Git Python, SQLAlchemy
#You will need to have Git installed, and it will need to be in your path.
#For example, on Windows you should be able to run a command like 'git pull' from the
#ordinary Windows command prompt and not just from Git Bash.
#You will need a MySQL server with the MSR14 datasource or other GHTorrent database with the same schema.
#Edit the line in this code that says db = sqlalchemy.create_engine to match your username:password@hostname:port/database.
#This file is hardcoded to download the ghdata repository.
#Since it is a preliminary example, each time it runs,
#it deletes the local ghdata repo and re-downloads it (though this might not be a good option for the future).
#Because of this: if you have a folder named ghdata whose contents you do not want deleted,
#do not place this file in the same folder as your ghdata folder.
#to run this, type "python pythonBlameHistoryTree.py" into the command prompt
#You will see some output about running on 127.0.0.1:5000 in the command prompt
#Open a web browser and navigate to 127.0.0.1:5000.
#This page will load for quite a while. At least several minutes is expected.
#You can see it is still running due to the testing output in the command prompt Outer loop: commit# Inner loop: commit#
#When the testing output stops running you should see some output in the browser tab.
#the output shows the commit number and date, the total lines of code and other files (for example, the readme)
#and the percentage written by each organization.
#expected output for ghdata should show only the spdx-tools organization (Matt is a member)
#Number of lines corresponds to the lines written by Matt.
#You can see that earlier commits are lower on the page, and chronologically later ones appear higher up.
#An "error" I expect us to encounter when testing other repos:
#The way my sql query works right now, a user can be a member of multiple organizations.
#For a simple case of expected output problems:
#User1 wrote the entire repository (100%)
#User1 is a member of Microsoft and IBM
#Microsoft wrote 100% of the repository. IBM also wrote 100% of the repository for a total of 200%
#Other issues:
#If a user does not have both an email and organization available in GHTorrent database,
#the user will not be counted towards any organization.
#Future changes planned for this file:
#Code cleanup for better readability
#Code commenting for each portion
#Thorough testing for various potential cases we might encounter
#Deciding for certain how to decide whether a user is a member of an organization
#A better method of dealing with local repository rather than deleting each time and re-downloading
#Not having the database password directly in the code
#Look into improving code efficiency where possible for faster runtime
from flask import Flask
from git import *
import sqlalchemy
from sqlalchemy import text
import shutil
import os
import stat
import time
app = Flask(__name__)
@app.route("/")
def pythonBlameHistory():
#path is the hardcoded folder for the last download of ghdata
repo_path = './ghdata'
#We must remove the old ghdata if we want to download a new copy.
#In order to delete it, we must first change the permissions
#To be writable for all files and directories.
#Based on this: http://stackoverflow.com/questions/2853723/whats-the-python-way-for-recursively-setting-file-permissions
if os.path.exists(repo_path):
for root, directories, files in os.walk(repo_path):
for directory in directories:
os.chmod(os.path.join(root, directory), stat.S_IWRITE)
for file in files:
os.chmod(os.path.join(root, file), stat.S_IWRITE)
os.chmod(repo_path, stat.S_IWRITE)
#delete the old ghdata
shutil.rmtree(repo_path)
#connect to the database username:password@hostname:port/databasename
db = sqlalchemy.create_engine('mysql+pymysql://root:password@localhost:3306/msr14')
schema = sqlalchemy.MetaData()
schema.reflect(bind=db)
#Get the ghdata repository from GitHub
repo = Repo.init('ghdata')
origin = repo.create_remote('origin','https://github.com/OSSHealth/ghdata.git')
origin.fetch()
origin.pull(origin.refs[0].remote_head)
#Dictionary to store results of sql queries
#associating emails with organizations.
#Without this dictionary, we would have to repeat
#the same query over and over, which on my local machine
#meant a runtime of over 24 hours (as opposed to several minutes using the dictionary)
orgs_associated_with_user = {}
#This dictionary keeps track of the lines written per organization for a single file.
lines_per_organization_per_file = {}
#This is the total number of lines in a single file
total_lines_in_file = 0
#this is used later to hold percentage results for output
percentage = 0
#This is the total number of lines in an entire repo
total_lines_in_repo = 0
#This dictionary keeps track of the lines written per organization for the entire repo.
lines_per_organization_entire_repo = {}
#The output string will be displayed to the screen once everything is done running.
outputString = ""
#Outer loop: loop through each commit in the master branch.
#This corresponds to the history of commits over time.
for history_commit in repo.iter_commits('master'):
#Since we want to see the change over time in repo percentage by organization,
#clear the variables for total lines and organization lines for each new commit
#we examine.
lines_per_organization_entire_repo = {}
total_lines_in_repo = 0
#Testing output: only purpose is to show you it's still running :)
print("Outer loop: " + str(history_commit))
#Now loop through every file in the repo.
#You cannot use the os library file/directory loop for this part.
#(as was used above to change file permissions)
#That is because some files do not exist in every commit.
#You must loop through the commit tree, not the ghdata directory.
for file_in_repo in history_commit.tree.traverse():
#For each file, we want to clear out the total lines and organization totals per file.
#That's because we're starting over with a new file.
lines_per_organization_per_file = {}
total_lines_in_file = 0
#Files are of the blob type. This if statement prevents us from trying
#to examine 'lines' in a directory.
if file_in_repo.type == 'blob':
#Now for each file, perform git blame. This will traverse
#the lines in the file.
#You can see there are now two variables of type commit:
#history_commit and blame_commit (will improve variable naming in a future update)
#history_commit is the commit with respect to the overall repo history.
#blame_commit is the commit in which this line was most recently changed
#as obtained through git blame. We use the "blame_commit" variable
#to obtain the author of the commit for when the lines were last changed.
for blame_commit, lines in repo.blame(history_commit, file_in_repo.path):
#Git blame does not always return one line at a time.
#Sometimes we are returned several lines committed by the same author.
#In that case, we must count how many lines there are or our
#total will not match the actual file.
blameLineCount = 0
for line in lines:
#increment lines to later attribute to an organization.
blameLineCount += 1
#increment lines in the file as a whole
total_lines_in_file += 1
#Testing output: only shows that things are still running.
print("Inner loop: " + str(blame_commit))
#Get the email address of the author of this commit.
#If we already have it in our dictionary, increase the total
#lines for the associated organization by blameLineCount
if blame_commit.author.email in orgs_associated_with_user:
for organization in orgs_associated_with_user[blame_commit.author.email]:
if organization not in lines_per_organization_per_file:
lines_per_organization_per_file[organization] = blameLineCount
else:
lines_per_organization_per_file[organization] += blameLineCount
#If the email address is not in our dictionary, we must query
#the database to get any associated organizations.
else:
sql = text('select orgUser.login as org_name '
'from users as thisUser join organization_members '
'on organization_members.user_id = thisUser.id '
'join users as orgUser on organization_members.org_id = orgUser.id '
'where thisUser.email = "' + blame_commit.author.email + '"')
result = db.engine.execute(sql)
#add the email to the dictionary
orgs_associated_with_user[blame_commit.author.email] = []
#if there are organizations in the result, associate those organizations with the
#user email in the dictionary.
#Then, set or add blameLineCount to the organization total.
for organization_row in result:
orgs_associated_with_user[blame_commit.author.email] = orgs_associated_with_user[blame_commit.author.email] + [organization_row[0]]
if organization_row[0] not in lines_per_organization_per_file:
lines_per_organization_per_file[organization_row[0]] = blameLineCount
else:
lines_per_organization_per_file[organization_row[0]] += blameLineCount
#If there is at least one line in this file
if total_lines_in_file > 0:
#Add the total lines in this file to the total lines in the repo.
total_lines_in_repo += total_lines_in_file
#Loop through the organization total lines for this file.
#Add each organization to the repo's organization total lines.
for organization in lines_per_organization_per_file:
if organization not in lines_per_organization_entire_repo:
lines_per_organization_entire_repo[organization] = lines_per_organization_per_file[organization]
else:
lines_per_organization_entire_repo[organization] += lines_per_organization_per_file[organization]
#Calculate the percentage for this file by organization (no longer used: former testing output)
percentage = lines_per_organization_per_file[organization] / total_lines_in_file * 100
#Construct output for this commit. First output the commit, date, and total lines in the repo.
outputString = outputString + "REPO TOTALS FOR COMMIT: " + str(history_commit) + " authored at " + time.strftime("%I:%M %p, %b %d, %Y", time.gmtime(history_commit.authored_date)) + " <br>"
outputString = outputString + "TOTAL REPO LINES: " + str(total_lines_in_repo) + "<br>"
#Now loop through the organizations and calculate the percentage of the repo for each.
#Output a line for each organization showing organization name, lines from that organization, percentage of the file
for organization in lines_per_organization_entire_repo:
percentage = lines_per_organization_entire_repo[organization] / total_lines_in_repo * 100
outputString = outputString + " ORGANIZATION: " + str(organization) + " ORG TOTAL LINES: " + str(lines_per_organization_entire_repo[organization]) + " PERCENTAGE OF REPO: " + str(percentage) + "%<br>"
#Output line between each commit in the history for easier legibility.
outputString = outputString + "----------------------------------------------------------------------------<br>"
#Show the outputString in the browser.
return outputString
if __name__ == "__main__":
app.run()
|
Hackers-To-Engineers/ghdata-sprint1team-2
|
organizationHistory/pythonBlameHistoryTree.py
|
Python
|
mit
| 12,918 | 0.014476 |
# Copyright (c) 2014 Ahmed H. Ismail
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
spdx/tools-python
|
spdx/parsers/__init__.py
|
Python
|
apache-2.0
| 577 | 0 |
#!/usr/bin/env python
# -*- coding: ascii -*-
from subprocess import Popen, PIPE
import threading
import select
import logging
import fcntl
import time
import sys
import os
TTY_OPTS="-icrnl -onlcr -imaxbel -opost -isig -icanon -echo line 0 kill ^H min 100 time 2 brkint 115200"
READERS = []
WRITERS = []
SELECT_TO = 0.1
def tty_set_opts(dev, opts):
"""Set tty options"""
cmd = ["stty", "-F", dev] + opts.split(" ")
prcs = Popen(cmd, stdout=PIPE,stderr=PIPE)
out, err = prcs.communicate()
if out:
logging.info(out)
if err:
logging.error(err)
return prcs.returncode
class TTYWorker(threading.Thread):
def __init__(self, dev, root):
threading.Thread.__init__(self)
self.tty = os.path.basename(dev)
self.dev = dev
self.root = root
self.keep_running = True
def stop(self):
self.keep_running = False
def run(self):
raise Exception("Not implemented")
class TTYReader(TTYWorker):
"""Reads tty output to file"""
def run(self):
tty_out_path = os.sep.join([self.root, "%s.log" % self.tty])
logging.info("tty_out_path(%s)" % tty_out_path)
while self.keep_running:
err = not os.path.exists(self.dev)
if err:
logging.error("dev(%s) does not exist" % self.dev)
time.sleep(1)
continue
err = not os.path.exists(self.root)
if err:
logging.error("root(%s) does not exist" % self.root)
time.sleep(1)
continue
err = tty_set_opts(self.dev, TTY_OPTS)
if err:
logging.error("failed stty err(%d)", err)
time.sleep(1)
continue
try:
with open(self.dev, "rb", 0) as dev_r, \
open(tty_out_path, "ab", 0) as tty_out:
while self.keep_running and \
os.fstat(dev_r.fileno()).st_nlink and \
os.fstat(tty_out.fileno()).st_nlink:
ready, _, _ = select.select(
[dev_r.fileno()], [], [], SELECT_TO
)
if not ready:
continue
logging.debug("dev_r.read(1)")
payload = dev_r.read(1)
logging.debug("dev_r.read(1) -- DONE")
if payload is None:
break
logging.debug("tty_out.write")
tty_out.write(payload)
logging.debug("tty_out.write -- DONE")
except:
logging.error("error(%s)" % str(sys.exc_info()))
class TTYWriter(TTYWorker):
"""Write commands to tty"""
def run(self):
tty_in_path = os.sep.join([self.root, "%s.in" % self.tty])
logging.info("tty_in(%s)" % tty_in_path)
while self.keep_running:
err = not os.path.exists(self.dev)
if err:
logging.error("dev(%s) does not exist" % self.dev)
time.sleep(1)
continue
err = not os.path.exists(self.root)
if err:
logging.error("root(%s) does not exist" % self.root)
time.sleep(1)
continue
err = not os.path.exists(tty_in_path)
if err:
logging.error("tty_in_path(%s) does not exist" % tty_in_path)
time.sleep(1)
continue
err = tty_set_opts(self.dev, TTY_OPTS)
if err:
logging.error("failed stty err(%d)", err)
time.sleep(1)
continue
try:
with open(self.dev, "a", 0) as dev_w, \
open(tty_in_path, "r", 0) as tty_in:
tty_in.seek(0, 2)
while self.keep_running and \
os.fstat(dev_w.fileno()).st_nlink and \
os.fstat(tty_in.fileno()).st_nlink:
ready, _, _ = select.select(
[tty_in.fileno()], [], [], SELECT_TO
)
if not ready:
continue
line = tty_in.readline()
if not line:
continue
logging.debug("dev_w.write")
dev_w.write(line.strip())
logging.debug("dev_w.write -- DONE")
time.sleep(0.1)
logging.debug("dev_w.write CR")
dev_w.write('\r')
logging.debug("dev_w.write CR -- DONE")
except:
logging.error("error(%s)" % str(sys.exc_info()))
def main(cfg, state):
"""Entry point for wtty-iod"""
logging.critical("Starting...")
for tty in cfg["devices"]:
READERS.append(TTYReader(tty, cfg["roots"]["reader"]))
WRITERS.append(TTYWriter(tty, cfg["roots"]["writer"]))
logging.info("Starting workers")
for worker in READERS + WRITERS:
worker.start()
logging.critical("Working...")
while (state["keep_running"]):
time.sleep(0.1)
logging.info("Stopping")
for i, worker in enumerate(WRITERS + READERS):
logging.debug("Stopping i(%d)" % i)
worker.stop()
logging.info("Joining")
for i, worker in enumerate(WRITERS + READERS):
logging.debug("Joining i(%d)" % i)
worker.join()
logging.critical("Stopped.")
|
safl/wtty
|
wtty/iod.py
|
Python
|
apache-2.0
| 5,729 | 0.002095 |
from __future__ import division, absolute_import, print_function,\
unicode_literals
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup, Extension
from distutils.core import Extension
from distutils.errors import DistutilsError
from distutils.command.build_ext import build_ext
with open(os.path.join('nanomsg','version.py')) as f:
exec(f.read())
class skippable_build_ext(build_ext):
def run(self):
try:
build_ext.run(self)
except Exception as e:
print()
print("=" * 79)
print("WARNING : CPython API extension could not be built.")
print()
print("Exception was : %r" % (e,))
print()
print(
"If you need the extensions (they may be faster than "
"alternative on some"
)
print(" platforms) check you have a compiler configured with all"
" the necessary")
print(" headers and libraries.")
print("=" * 79)
print()
try:
import ctypes
if sys.platform in ('win32', 'cygwin'):
_lib = ctypes.windll.nanoconfig
else:
_lib = ctypes.cdll.LoadLibrary('libnanoconfig.so')
except OSError:
# Building without nanoconfig
cpy_extension = Extension(str('_nanomsg_cpy'),
sources=[str('_nanomsg_cpy/wrapper.c')],
libraries=[str('nanomsg')],
)
else:
# Building with nanoconfig
cpy_extension = Extension(str('_nanomsg_cpy'),
define_macros=[('WITH_NANOCONFIG', '1')],
sources=[str('_nanomsg_cpy/wrapper.c')],
libraries=[str('nanomsg'), str('nanoconfig')],
)
install_requires = []
try:
import importlib
except ImportError:
install_requires.append('importlib')
setup(
name='nanomsg',
version=__version__,
packages=[str('nanomsg'), str('_nanomsg_ctypes'), str('nanomsg_wrappers')],
ext_modules=[cpy_extension],
cmdclass = {'build_ext': skippable_build_ext},
install_requires=install_requires,
description='Python library for nanomsg.',
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
],
author='Tony Simpson',
author_email='agjasimpson@gmail.com',
url='https://github.com/tonysimpson/nanomsg-python',
keywords=['nanomsg', 'driver'],
license='MIT',
test_suite="tests",
)
|
romanoved/nanomsg-python
|
setup.py
|
Python
|
mit
| 2,912 | 0.003777 |
# -*- coding: utf-8 -*-
"""The application's model objects"""
from zope.sqlalchemy import ZopeTransactionExtension
from sqlalchemy.orm import scoped_session, sessionmaker
# from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import declarative_base
# Global session manager: DBSession() returns the Thread-local
# session object appropriate for the current web request.
maker = sessionmaker( autoflush = True, autocommit = False,
extension = ZopeTransactionExtension() )
DBSession = scoped_session( maker )
# Base class for all of our model classes: By default, the data model is
# defined with SQLAlchemy's declarative extension, but if you need more
# control, you can switch to the traditional method.
DeclarativeBase = declarative_base()
# There are two convenient ways for you to spare some typing.
# You can have a query property on all your model classes by doing this:
# DeclarativeBase.query = DBSession.query_property()
# Or you can use a session-aware mapper as it was used in TurboGears 1:
# DeclarativeBase = declarative_base(mapper=DBSession.mapper)
# Global metadata.
# The default metadata is the one from the declarative base.
metadata = DeclarativeBase.metadata
# If you have multiple databases with overlapping table names, you'll need a
# metadata for each database. Feel free to rename 'metadata2'.
# metadata2 = MetaData()
#####
# Generally you will not want to define your table's mappers, and data objects
# here in __init__ but will want to create modules them in the model directory
# and import them at the bottom of this file.
#
######
def init_model( engine ):
"""Call me before using any of the tables or classes in the model."""
DBSession.configure( bind = engine )
# If you are using reflection to introspect your database and create
# table objects for you, your tables must be defined and mapped inside
# the init_model function, so that the engine is available if you
# use the model outside tg2, you need to make sure this is called before
# you use the model.
#
# See the following example:
# global t_reflected
# t_reflected = Table("Reflected", metadata,
# autoload=True, autoload_with=engine)
# mapper(Reflected, t_reflected)
# Import your model modules here.
from auth import User, Group, Permission
from logic import *
from sysutil import *
from fileutil import *
|
LamCiuLoeng/budget
|
budget/model/__init__.py
|
Python
|
mit
| 2,408 | 0.009551 |
# -*- coding: utf8 -*-
# This file is part of Mnemosyne.
#
# Copyright (C) 2013 Daniel Lombraña González
#
# Mnemosyne is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mnemosyne is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Mnemosyne. If not, see <http://www.gnu.org/licenses/>.
"""
Package for creating the Flask application.
This exports:
- create_app a function that creates the Flask application
"""
from flask import Flask
from mnemosyne.frontend import frontend
from mnemosyne.model import db
try:
import mnemosyne.settings as settings
except:
print "Settings file is missing"
def create_app(db_name=None, testing=False):
"""
Create the Flask app object after configuring it.
Keyword arguments:
db_name -- Database name
testing -- Enable/Disable testing mode
Return value:
app -- Flask application object
"""
try:
app = Flask(__name__)
app.config.from_object(settings)
except:
print "Settings file is missing, trying with env config..."
app.config.from_envvar('MNEMOSYNE_SETTINGS', silent=False)
if db_name:
app.config['SQLALCHEMY_DATABASE_URI'] = db_name
db.init_app(app)
app.register_blueprint(frontend)
return app
|
PyBossa/mnemosyne
|
mnemosyne/core.py
|
Python
|
agpl-3.0
| 1,728 | 0.001159 |
from django.contrib.contenttypes.models import ContentType
from lfs.core.utils import import_symbol
from lfs.criteria.models import Criterion
import logging
logger = logging.getLogger(__name__)
# DEPRECATED 0.8
def is_valid(request, object, product=None):
"""
Returns True if the given object is valid. This is calculated via the
attached criteria.
Passed object is an object which can have criteria. At the moment these are
discounts, shipping/payment methods and shipping/payment prices.
"""
logger.info("Decprecated: lfs.criteria.utils.is_valid: this function is deprecated. Please use the Criteria class instead.")
for criterion_object in get_criteria(object):
criterion_object.request = request
criterion_object.product = product
if criterion_object.is_valid() is False:
return False
return True
# DEPRECATED 0.8
def get_criteria(object):
"""
Returns all criteria for given object.
"""
logger.info("Decprecated: lfs.criteria.utils.get_criteria: this function is deprecated. Please use the Criteria class instead.")
content_type = ContentType.objects.get_for_model(object)
criteria = []
for criterion in Criterion.objects.filter(content_id=object.id, content_type=content_type):
criteria.append(criterion.get_content_object())
return criteria
def get_first_valid(request, objects, product=None):
"""
Returns the first valid object of given objects.
Passed object is an object which can have criteria. At the moment these are
discounts, shipping/payment methods and shipping/payment prices.
"""
for object in objects:
if object.is_valid(request, product):
return object
return None
# DEPRECATED 0.8
def save_criteria(request, object):
"""
Saves the criteria for the given object. The criteria are passed via
request body.
"""
logger.info("Decprecated: lfs.criteria.utils.save_criteria: this function is deprecated. Please use the Criteria class instead.")
# First we delete all existing criteria objects for the given object.
for co in get_criteria(object):
co.delete()
# Then we add all passed criteria to the object.
for key, model in request.POST.items():
if key.startswith("type"):
try:
id = key.split("-")[1]
except KeyError:
continue
# Get the values for the criterion
operator = request.POST.get("operator-%s" % id)
position = request.POST.get("position-%s" % id)
criterion_class = import_symbol(model)
criterion = criterion_class.objects.create(content=object, operator=operator, position=position)
if criterion.get_value_type() == criterion.MULTIPLE_SELECT:
value = request.POST.getlist("value-%s" % id)
else:
value = request.POST.get("value-%s" % id)
criterion.update(value)
|
diefenbach/django-lfs
|
lfs/criteria/utils.py
|
Python
|
bsd-3-clause
| 3,002 | 0.001666 |
"""
Load the CCGOIS datasets into a CKAN instance
"""
import dc
import json
import slugify
import ffs
def make_name_from_title(title):
# For some reason, we're finding duplicate names
name = slugify.slugify(title).lower()[:99]
if not name.startswith('ccgois-'):
name = u"ccgois-{}".format(name)
return name
def load_ccgois(datasets):
for metadata in datasets:
resources = [
dict(
description=r['description'],
name=r['name'],
format=r['filetype'],
url=r['url']
)
for r in metadata['resources']
]
print [r['name'] for r in metadata['resources']]
metadata['title'] = u'CCGOIS - {}'.format(metadata['title'])
metadata['name'] = make_name_from_title(metadata['title'])
print u'Creating {}'.format(metadata['name'])
dc.Dataset.create_or_update(
name=metadata['name'],
title=metadata['title'],
state='active',
license_id='uk-ogl',
notes=metadata['description'],
origin='https://indicators.ic.nhs.uk/webview/',
tags=dc.tags(*metadata['keyword(s)']),
resources=resources,
#frequency=[metadata['frequency'], ],
owner_org='hscic',
extras=[
dict(key='frequency', value=metadata.get('frequency', '')),
dict(key='coverage_start_date', value=metadata['coverage_start_date']),
dict(key='coverage_end_date', value=metadata['coverage_end_date']),
dict(key='domain', value=metadata['domain']),
dict(key='origin', value='HSCIC'),
dict(key='next_version_due', value=metadata['next version due']),
dict(key='nhs_OF_indicators', value=metadata['nhs_of_indicators']),
dict(key='HSCIC_unique_id', value=metadata['unique identifier']),
dict(key='homepage', value=metadata['homepage']),
dict(key='status', value=metadata['status']),
dict(key='language', value=metadata['language']),
dict(key='assurance_level', value=metadata['assurance_level']),
dict(key='release_date', value=metadata['current version uploaded'])
]
)
return
def group_ccgois(datasets):
for metadata in datasets:
dataset_name = make_name_from_title(metadata['title'])
try:
dataset = dc.ckan.action.package_show(id=dataset_name)
except:
print "Failed to find dataset: {}".format(dataset_name)
print "Can't add to group"
continue
if [g for g in dataset.get('groups', []) if g['name'] == 'ccgois']:
print 'Already in group', g['name']
else:
dc.ckan.action.member_create(
id='ccgois',
object=dataset_name,
object_type='package',
capacity='member'
)
return
def main(workspace):
DATA_DIR = ffs.Path(workspace)
datasets = json.load(open(DATA_DIR / 'ccgois_indicators.json'))
dc.ensure_publisher('hscic')
dc.ensure_group('ccgois')
load_ccgois(datasets)
group_ccgois(datasets)
|
nhsengland/publish-o-matic
|
datasets/ccgois/load.py
|
Python
|
mit
| 3,287 | 0.003651 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class NamespaceCreateOrUpdateParameters(Resource):
"""Parameters supplied to the CreateOrUpdate Namespace operation.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:param sku: The sku of the created namespace
:type sku: :class:`Sku <azure.mgmt.notificationhubs.models.Sku>`
:param namespace_create_or_update_parameters_name: The name of the
namespace.
:type namespace_create_or_update_parameters_name: str
:param provisioning_state: Provisioning state of the Namespace.
:type provisioning_state: str
:param region: Specifies the targeted region in which the namespace should
be created. It can be any of the following values: Australia EastAustralia
SoutheastCentral USEast USEast US 2West USNorth Central USSouth Central
USEast AsiaSoutheast AsiaBrazil SouthJapan EastJapan WestNorth EuropeWest
Europe
:type region: str
:param status: Status of the namespace. It can be any of these values:1 =
Created/Active2 = Creating3 = Suspended4 = Deleting
:type status: str
:param created_at: The time the namespace was created.
:type created_at: datetime
:param service_bus_endpoint: Endpoint you can use to perform
NotificationHub operations.
:type service_bus_endpoint: str
:param subscription_id: The Id of the Azure subscription associated with
the namespace.
:type subscription_id: str
:param scale_unit: ScaleUnit where the namespace gets created
:type scale_unit: str
:param enabled: Whether or not the namespace is currently enabled.
:type enabled: bool
:param critical: Whether or not the namespace is set as Critical.
:type critical: bool
:param namespace_type: The namespace type. Possible values include:
'Messaging', 'NotificationHub'
:type namespace_type: str or :class:`NamespaceType
<azure.mgmt.notificationhubs.models.NamespaceType>`
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'namespace_create_or_update_parameters_name': {'key': 'properties.name', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'region': {'key': 'properties.region', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'},
'service_bus_endpoint': {'key': 'properties.serviceBusEndpoint', 'type': 'str'},
'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'},
'scale_unit': {'key': 'properties.scaleUnit', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'critical': {'key': 'properties.critical', 'type': 'bool'},
'namespace_type': {'key': 'properties.namespaceType', 'type': 'NamespaceType'},
}
def __init__(self, location, tags=None, sku=None, namespace_create_or_update_parameters_name=None, provisioning_state=None, region=None, status=None, created_at=None, service_bus_endpoint=None, subscription_id=None, scale_unit=None, enabled=None, critical=None, namespace_type=None):
super(NamespaceCreateOrUpdateParameters, self).__init__(location=location, tags=tags, sku=sku)
self.namespace_create_or_update_parameters_name = namespace_create_or_update_parameters_name
self.provisioning_state = provisioning_state
self.region = region
self.status = status
self.created_at = created_at
self.service_bus_endpoint = service_bus_endpoint
self.subscription_id = subscription_id
self.scale_unit = scale_unit
self.enabled = enabled
self.critical = critical
self.namespace_type = namespace_type
|
SUSE/azure-sdk-for-python
|
azure-mgmt-notificationhubs/azure/mgmt/notificationhubs/models/namespace_create_or_update_parameters.py
|
Python
|
mit
| 4,958 | 0.001412 |
# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
# Copyright (c) 2015, Google, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy
def uniform(min_val, max_val, point_count):
grid = numpy.array(range(point_count)) + 0.5
grid *= (max_val - min_val) / float(point_count)
grid += min_val
return grid
def center_heavy(min_val, max_val, point_count):
grid = uniform(-1, 1, point_count)
grid = numpy.arcsin(grid) / numpy.pi + 0.5
grid *= max_val - min_val
grid += min_val
return grid
def left_heavy(min_val, max_val, point_count):
grid = uniform(0, 1, point_count)
grid = grid ** 2
grid *= max_val - min_val
grid += min_val
return grid
def right_heavy(min_val, max_val, point_count):
grid = left_heavy(max_val, min_val, point_count)
return grid[::-1].copy()
def pitman_yor(
min_alpha=0.1,
max_alpha=100,
min_d=0,
max_d=0.5,
alpha_count=20,
d_count=10):
'''
For d = 0, this degenerates to the CRP, where the expected number of
tables is:
E[table_count] = O(alpha log(customer_count))
'''
min_alpha = float(min_alpha)
max_alpha = float(max_alpha)
min_d = float(min_d)
max_d = float(max_d)
lower_triangle = [
(x, y)
for x in center_heavy(0, 1, alpha_count)
for y in left_heavy(0, 1, d_count)
if x + y < 1
]
alpha = lambda x: min_alpha * (max_alpha / min_alpha) ** x
d = lambda y: min_d + (max_d - min_d) * y
grid = [
{'alpha': alpha(x), 'd': d(y)}
for (x, y) in lower_triangle
]
return grid
|
fritzo/loom
|
loom/gridding.py
|
Python
|
bsd-3-clause
| 3,080 | 0.000649 |
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Pelix shell bundle.
Provides the basic command parsing and execution support to make a Pelix shell.
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.5.8
:status: Beta
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 5, 8)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Shell constants
from . import SERVICE_SHELL, SERVICE_SHELL_COMMAND, \
SERVICE_SHELL_UTILS
import pelix.shell.beans as beans
# Pelix modules
from pelix.utilities import to_str, to_bytes
import pelix.constants as constants
import pelix.framework as pelix
# Standard library
import collections
import inspect
import linecache
import logging
import os
import shlex
import string
import sys
import traceback
import threading
# ------------------------------------------------------------------------------
DEFAULT_NAMESPACE = "default"
""" Default command name space: default """
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
def _find_assignment(arg_token):
"""
Find the first non-escaped assignment in the given argument token.
Returns -1 if no assignment was found.
:param arg_token: The argument token
:return: The index of the first assignment, or -1
"""
idx = arg_token.find('=')
while idx != -1:
if idx != 0:
if arg_token[idx - 1] != '\\':
# No escape character
return idx
idx = arg_token.find('=', idx + 1)
# No assignment found
return -1
class _ArgTemplate(string.Template):
"""
Argument string template class
"""
idpattern = r'[_a-z\?][_a-z0-9\.]*'
def _make_args(args_list, session, fw_props):
"""
Converts the given list of arguments into a list (args) and a
dictionary (kwargs).
All arguments with an assignment are put into kwargs, others in args.
:param args_list: The list of arguments to be treated
:param session: The current shell session
:return: The (arg_token, kwargs) tuple.
"""
args = []
kwargs = {}
for arg_token in args_list:
idx = _find_assignment(arg_token)
if idx != -1:
# Assignment
key = arg_token[:idx]
value = arg_token[idx + 1:]
kwargs[key] = value
else:
# Direct argument
args.append(arg_token)
# Prepare the dictionary of variables
variables = collections.defaultdict(str)
variables.update(fw_props)
variables.update(session.variables)
# Replace variables
args = [_ArgTemplate(arg).safe_substitute(variables) for arg in args]
kwargs = dict((key, _ArgTemplate(value).safe_substitute(variables))
for key, value in kwargs.items())
return args, kwargs
def _split_ns_command(cmd_token):
"""
Extracts the name space and the command name of the given command token.
:param cmd_token: The command token
:return: The extracted (name space, command) tuple
"""
namespace = None
cmd_split = cmd_token.split('.', 1)
if len(cmd_split) == 1:
# No name space given
command = cmd_split[0]
else:
# Got a name space and a command
namespace = cmd_split[0]
command = cmd_split[1]
if not namespace:
# No name space given: given an empty one
namespace = ""
# Use lower case values only
return namespace.lower(), command.lower()
# ------------------------------------------------------------------------------
class ShellUtils(object):
"""
Utility methods for the shell
"""
def bundlestate_to_str(self, state):
"""
Converts a bundle state integer to a string
"""
states = {
pelix.Bundle.INSTALLED: "INSTALLED",
pelix.Bundle.ACTIVE: "ACTIVE",
pelix.Bundle.RESOLVED: "RESOLVED",
pelix.Bundle.STARTING: "STARTING",
pelix.Bundle.STOPPING: "STOPPING",
pelix.Bundle.UNINSTALLED: "UNINSTALLED"
}
return states.get(state, "Unknown state ({0})".format(state))
def make_table(self, headers, lines, prefix=None):
"""
Generates an ASCII table according to the given headers and lines
:param headers: List of table headers (N-tuple)
:param lines: List of table lines (N-tuples)
:param prefix: Optional prefix for each line
:return: The ASCII representation of the table
:raise ValueError: Different number of columns between headers and
lines
"""
# Normalize the prefix
prefix = str(prefix or "")
# Maximum lengths
lengths = [len(title) for title in headers]
# Store the number of columns (0-based)
nb_columns = len(lengths) - 1
# Lines
str_lines = []
for idx, line in enumerate(lines):
# Recompute lengths
str_line = []
str_lines.append(str_line)
column = -1
try:
for column, entry in enumerate(line):
str_entry = str(entry)
str_line.append(str_entry)
if len(str_entry) > lengths[column]:
lengths[column] = len(str_entry)
except IndexError:
# Line too small/big
raise ValueError("Different sizes for header and lines "
"(line {0})".format(idx + 1))
except (TypeError, AttributeError):
# Invalid type of line
raise ValueError("Invalid type of line: %s",
type(line).__name__)
else:
if column != nb_columns:
# Check if all lines have the same number of columns
raise ValueError("Different sizes for header and lines "
"(line {0})".format(idx + 1))
# Prepare the head (centered text)
format_str = "{0}|".format(prefix)
for column, length in enumerate(lengths):
format_str += " {%d:^%d} |" % (column, length)
head_str = format_str.format(*headers)
# Prepare the separator, according the length of the headers string
separator = '{0}{1}'.format(prefix,
'-' * (len(head_str) - len(prefix)))
idx = head_str.find('|')
while idx != -1:
separator = '+'.join((separator[:idx], separator[idx + 1:]))
idx = head_str.find('|', idx + 1)
# Prepare the output
output = [separator,
head_str,
separator.replace('-', '=')]
# Compute the lines
format_str = format_str.replace('^', '<')
for line in str_lines:
output.append(format_str.format(*line))
output.append(separator)
# Force the last end of line
output.append("")
# Join'em
return '\n'.join(output)
# ------------------------------------------------------------------------------
class Shell(object):
"""
A simple shell, based on shlex.
Allows to use name spaces.
"""
def __init__(self, context, utilities):
"""
Sets up the shell
:param context: The bundle context
"""
self._commands = {}
self._context = context
self._framework = context.get_bundle(0)
self._utils = utilities
# Bound services: reference -> service
self._bound_references = {}
# Service reference -> (name space, [commands])
self._reference_commands = {}
# Register basic commands
self.register_command(None, "bd", self.bundle_details)
self.register_command(None, "bl", self.bundles_list)
self.register_command(None, "sd", self.service_details)
self.register_command(None, "sl", self.services_list)
self.register_command(None, "set", self.var_set)
self.register_command(None, "unset", self.var_unset)
self.register_command(None, "run", self.run_file)
self.register_command(None, "start", self.start)
self.register_command(None, "stop", self.stop)
self.register_command(None, "update", self.update)
self.register_command(None, "install", self.install)
self.register_command(None, "uninstall", self.uninstall)
self.register_command(None, "properties", self.properties_list)
self.register_command(None, "property", self.property_value)
self.register_command(None, "sysprops", self.environment_list)
self.register_command(None, "sysprop", self.environment_value)
self.register_command(None, "threads", self.threads_list)
self.register_command(None, "thread", self.thread_details)
self.register_command(None, "echo", self.echo)
self.register_command(None, "loglevel", self.log_level)
self.register_command(None, "help", self.print_help)
self.register_command(None, "?", self.print_help)
self.register_command(None, "quit", self.quit)
self.register_command(None, "close", self.quit)
self.register_command(None, "exit", self.quit)
def _bind_handler(self, svc_ref):
"""
Called if a command service has been found.
Registers the methods of this service.
:param svc_ref: A reference to the found service
:return: True if the commands have been registered
"""
if svc_ref in self._bound_references:
# Already bound service
return False
# Get the service
handler = self._context.get_service(svc_ref)
# Get its name space
namespace = handler.get_namespace()
commands = []
# Register all service methods directly
for command, method in handler.get_methods():
self.register_command(namespace, command, method)
commands.append(command)
# Store the reference
self._bound_references[svc_ref] = handler
self._reference_commands[svc_ref] = (namespace, commands)
return True
def _unbind_handler(self, svc_ref):
"""
Called if a command service is gone.
Unregisters its commands.
:param svc_ref: A reference to the unbound service
:return: True if the commands have been unregistered
"""
if svc_ref not in self._bound_references:
# Unknown reference
return False
# Unregister its commands
namespace, commands = self._reference_commands[svc_ref]
for command in commands:
self.unregister(namespace, command)
# Release the service
self._context.unget_service(svc_ref)
del self._bound_references[svc_ref]
del self._reference_commands[svc_ref]
return True
def register_command(self, namespace, command, method):
"""
Registers the given command to the shell.
The namespace can be None, empty or "default"
:param namespace: The command name space.
:param command: The shell name of the command
:param method: The method to call
:return: True if the method has been registered, False if it was
already known or invalid
"""
if method is None:
_logger.error("No method given for %s.%s", namespace, command)
return False
# Store everything in lower case
namespace = (namespace or "").strip().lower()
command = (command or "").strip().lower()
if not namespace:
namespace = DEFAULT_NAMESPACE
if not command:
_logger.error("No command name given")
return False
if namespace not in self._commands:
space = self._commands[namespace] = {}
else:
space = self._commands[namespace]
if command in space:
_logger.error("Command already registered: %s.%s", namespace,
command)
return False
space[command] = method
return True
def unregister(self, namespace, command=None):
"""
Unregisters the given command. If command is None, the whole name space
is unregistered.
:param namespace: The command name space.
:param command: The shell name of the command, or None
:return: True if the command was known, else False
"""
if not namespace:
namespace = DEFAULT_NAMESPACE
namespace = namespace.strip().lower()
if namespace not in self._commands:
_logger.warning("Unknown name space: %s", namespace)
return False
if command is not None:
# Remove the command
command = command.strip().lower()
if command not in self._commands[namespace]:
_logger.warning("Unknown command: %s.%s", namespace, command)
return False
del self._commands[namespace][command]
# Remove the name space if necessary
if not self._commands[namespace]:
del self._commands[namespace]
else:
# Remove the whole name space
del self._commands[namespace]
return True
def __find_command_ns(self, command):
"""
Returns the name spaces where the given command named is registered.
If the command exists in the default name space, the returned list will
only contain the default name space.
Returns an empty list of the command is unknown
:param command: A command name
:return: A list of name spaces
"""
# Look for the spaces where the command name appears
namespaces = []
for namespace, commands in self._commands.items():
if command in commands:
namespaces.append(namespace)
# Sort name spaces
namespaces.sort()
# Default name space must always come first
try:
namespaces.remove(DEFAULT_NAMESPACE)
namespaces.insert(0, DEFAULT_NAMESPACE)
except ValueError:
# Default name space wasn't present
pass
return namespaces
def get_ns_commands(self, cmd_name):
"""
Retrieves the possible name spaces and commands associated to the given
command name.
:param cmd_name: The given command name
:return: A list of 2-tuples (name space, command)
:raise ValueError: Unknown command name
"""
namespace, command = _split_ns_command(cmd_name)
if not namespace:
# Name space not given, look for the commands
spaces = self.__find_command_ns(command)
if not spaces:
# Unknown command
raise ValueError("Unknown command {0}".format(command))
else:
# Return a sorted list of tuples
return sorted((namespace, command) for namespace in spaces)
# Single match
return [(namespace, command)]
def get_ns_command(self, cmd_name):
"""
Retrieves the name space and the command associated to the given
command name.
:param cmd_name: The given command name
:return: A 2-tuple (name space, command)
:raise ValueError: Unknown command name
"""
namespace, command = _split_ns_command(cmd_name)
if not namespace:
# Name space not given, look for the command
spaces = self.__find_command_ns(command)
if not spaces:
# Unknown command
raise ValueError("Unknown command {0}".format(command))
elif len(spaces) > 1:
# Multiple possibilities
if spaces[0] == DEFAULT_NAMESPACE:
# Default name space has priority
namespace = DEFAULT_NAMESPACE
else:
# Ambiguous name
raise ValueError("Multiple name spaces for {0}: {1}"
.format(command, ', '.join(spaces)))
else:
# Use the found name space
namespace = spaces[0]
# Command found
return namespace, command
def execute(self, cmdline, session=None):
"""
Executes the command corresponding to the given line
:param cmdline: Command line to parse
:param session: Current shell session
:return: True if command succeeded, else False
"""
if session is None:
# Default session
session = beans.ShellSession(
beans.IOHandler(sys.stdin, sys.stdout), {})
assert isinstance(session, beans.ShellSession)
# Split the command line
if not cmdline:
return False
# Convert the line into a string
cmdline = to_str(cmdline)
try:
line_split = shlex.split(cmdline, True, True)
except ValueError as ex:
session.write_line("Error reading line: {0}", ex)
return False
if not line_split:
return False
try:
# Extract command information
namespace, command = self.get_ns_command(line_split[0])
except ValueError as ex:
# Unknown command
session.write_line(str(ex))
return False
# Get the content of the name space
space = self._commands.get(namespace, None)
if not space:
session.write_line("Unknown name space {0}", namespace)
return False
# Get the method object
method = space.get(command, None)
if method is None:
session.write_line("Unknown command: {0}.{1}", namespace, command)
return False
# Make arguments and keyword arguments
args, kwargs = _make_args(line_split[1:], session,
self._framework.get_properties())
try:
# Execute it
result = method(session, *args, **kwargs)
# Store the result as $?
if result is not None:
session.set(beans.RESULT_VAR_NAME, result)
# 0, None are considered as success, so don't use not nor bool
return result is not False
except TypeError as ex:
# Invalid arguments...
_logger.error("Error calling %s.%s: %s", namespace, command, ex)
session.write_line("Invalid method call: {0}", ex)
self.__print_namespace_help(session, namespace, command)
return False
except Exception as ex:
# Error
_logger.exception("Error calling %s.%s: %s",
namespace, command, ex)
session.write_line("{0}: {1}", type(ex).__name__, str(ex))
return False
finally:
# Try to flush in any case
try:
session.flush()
except IOError:
pass
def get_banner(self):
"""
Returns the Shell banner
"""
return "** Pelix Shell prompt **\n"
def get_ps1(self):
"""
Returns the PS1, the basic shell prompt
"""
return "$ "
def get_namespaces(self):
"""
Retrieves the list of known name spaces (without the default one)
:return: The list of known name spaces
"""
namespaces = list(self._commands.keys())
namespaces.remove(DEFAULT_NAMESPACE)
namespaces.sort()
return namespaces
def get_commands(self, namespace):
"""
Retrieves the commands of the given name space. If *namespace* is None
or empty, it retrieves the commands of the default name space
:param namespace: The commands name space
:return: A list of commands names
"""
if not namespace:
# Default name space:
namespace = DEFAULT_NAMESPACE
try:
namespace.strip().lower()
commands = list(self._commands[namespace].keys())
commands.sort()
return commands
except KeyError:
# Unknown name space
return []
def echo(self, io_handler, *words):
"""
Echoes the given words
"""
io_handler.write_line(' '.join(words))
def var_set(self, session, **kwargs):
"""
Sets the given variables or prints the current ones. "set answer=42"
"""
if not kwargs:
session.write_line(
self._utils.make_table(('Name', 'Value'),
session.variables.items()))
else:
for name, value in kwargs.items():
name = name.strip()
session.set(name, value)
session.write_line("{0}={1}", name, value)
def var_unset(self, session, name):
"""
Unsets the given variable
"""
name = name.strip()
try:
session.unset(name)
except KeyError:
session.write_line("Unknown variable: {0}", name)
return False
else:
session.write_line("Variable {0} unset.", name)
def run_file(self, session, filename):
"""
Runs the given "script" file
"""
try:
with open(filename, "r") as filep:
for lineno, line in enumerate(filep):
line = line.strip()
if not line or line.startswith("#"):
# Ignore comments and empty lines
continue
# Print out the executed line
session.write_line("[{0:02d}] >> {1}", lineno, line)
# Execute the line
if not self.execute(line, session):
session.write_line(
"Command at line {0} failed. Abandon.", lineno + 1)
return False
session.write_line("Script execution succeeded")
except IOError as ex:
session.write_line("Error reading file {0}: {1}", filename, ex)
return False
def bundle_details(self, io_handler, bundle_id):
"""
Prints the details of the bundle with the given ID or name
"""
bundle = None
try:
# Convert the given ID into an integer
bundle_id = int(bundle_id)
except ValueError:
# Not an integer, suppose it's a bundle name
for bundle in self._context.get_bundles():
if bundle.get_symbolic_name() == bundle_id:
break
else:
# Bundle not found
bundle = None
else:
# Integer ID: direct access
try:
bundle = self._context.get_bundle(bundle_id)
except constants.BundleException:
pass
if bundle is None:
# No matching bundle
io_handler.write_line("Unknown bundle ID: {0}", bundle_id)
return False
lines = ["ID......: {0}".format(bundle.get_bundle_id()),
"Name....: {0}".format(bundle.get_symbolic_name()),
"Version.: {0}".format(bundle.get_version()),
"State...: {0}".format(
self._utils.bundlestate_to_str(bundle.get_state())),
"Location: {0}".format(bundle.get_location()),
"Published services:"]
try:
services = bundle.get_registered_services()
if services:
for svc_ref in services:
lines.append("\t{0}".format(svc_ref))
else:
lines.append("\tn/a")
except constants.BundleException as ex:
# Bundle in a invalid state
lines.append("\tError: {0}".format(ex))
lines.append("Services used by this bundle:")
try:
services = bundle.get_services_in_use()
if services:
for svc_ref in services:
lines.append("\t{0}".format(svc_ref))
else:
lines.append("\tn/a")
except constants.BundleException as ex:
# Bundle in a invalid state
lines.append("\tError: {0}".format(ex))
lines.append("")
io_handler.write('\n'.join(lines))
def bundles_list(self, io_handler, name=None):
"""
Lists the bundles in the framework and their state. Possibility to
filter on the bundle name.
"""
# Head of the table
headers = ('ID', 'Name', 'State', 'Version')
# Get the bundles
bundles = self._context.get_bundles()
# The framework is not in the result of get_bundles()
bundles.insert(0, self._context.get_bundle(0))
if name is not None:
# Filter the list
bundles = [bundle for bundle in bundles
if name in bundle.get_symbolic_name()]
# Make the entries
lines = [[str(entry)
for entry in (bundle.get_bundle_id(),
bundle.get_symbolic_name(),
self._utils.bundlestate_to_str(
bundle.get_state()),
bundle.get_version())]
for bundle in bundles]
# Print'em all
io_handler.write(self._utils.make_table(headers, lines))
if name is None:
io_handler.write_line("{0} bundles installed", len(lines))
else:
io_handler.write_line("{0} filtered bundles", len(lines))
def service_details(self, io_handler, service_id):
"""
Prints the details of the service with the given ID
"""
svc_ref = self._context.get_service_reference(
None, '({0}={1})'.format(constants.SERVICE_ID, service_id))
if svc_ref is None:
io_handler.write_line('Service not found: {0}', service_id)
return False
lines = [
"ID............: {0}".format(
svc_ref.get_property(constants.SERVICE_ID)),
"Rank..........: {0}".format(
svc_ref.get_property(constants.SERVICE_RANKING)),
"Specifications: {0}".format(
svc_ref.get_property(constants.OBJECTCLASS)),
"Bundle........: {0}".format(svc_ref.get_bundle()),
"Properties....:"]
for key, value in sorted(svc_ref.get_properties().items()):
lines.append("\t{0} = {1}".format(key, value))
lines.append("Bundles using this service:")
for bundle in svc_ref.get_using_bundles():
lines.append("\t{0}".format(bundle))
lines.append("")
io_handler.write('\n'.join(lines))
def services_list(self, io_handler, specification=None):
"""
Lists the services in the framework. Possibility to filter on an exact
specification.
"""
# Head of the table
headers = ('ID', 'Specifications', 'Bundle', 'Ranking')
# Lines
references = self._context.get_all_service_references(
specification, None) or []
# Construct the list of services
lines = [[str(entry)
for entry in (ref.get_property(constants.SERVICE_ID),
ref.get_property(constants.OBJECTCLASS),
ref.get_bundle(),
ref.get_property(constants.SERVICE_RANKING))]
for ref in references]
if not lines and specification:
# No matching service found
io_handler.write_line("No service provides '{0}'", specification)
return False
else:
# Print'em all
io_handler.write(self._utils.make_table(headers, lines))
io_handler.write_line("{0} services registered", len(lines))
def __extract_help(self, method):
"""
Formats the help string for the given method
:param method: The method to document
:return: A tuple: (arguments list, documentation line)
"""
if method is None:
return "(No associated method)"
# Get the arguments
argspec = inspect.getargspec(method)
# Compute the number of arguments with default value
if argspec.defaults is not None:
nb_optional = len(argspec.defaults)
# Let the mandatory arguments as they are
args = ["<{0}>".format(arg)
for arg in argspec.args[2:-nb_optional]]
# Add the other arguments
for name, value in zip(argspec.args[-nb_optional:],
argspec.defaults[-nb_optional:]):
if value is not None:
args.append('[<{0}>={1}]'.format(name, value))
else:
args.append('[<{0}>]'.format(name))
else:
# All arguments are mandatory
args = ["<{0}>".format(arg) for arg in argspec.args[2:]]
# Extra arguments
if argspec.keywords:
args.append('[<property=value> ...]')
if argspec.varargs:
args.append("...")
# Get the documentation string
doc = inspect.getdoc(method) or "(Documentation missing)"
return ' '.join(args), ' '.join(doc.split())
def __print_command_help(self, io_handler, namespace, cmd_name):
"""
Prints the documentation of the given command
:param io_handler: I/O handler
:param namespace: Name space of the command
:param cmd_name: Name of the command
"""
# Extract documentation
args, doc = self.__extract_help(self._commands[namespace][cmd_name])
# Print the command name, and its arguments
if args:
io_handler.write_line("- {0} {1}", cmd_name, args)
else:
io_handler.write_line("- {0}", cmd_name)
# Print the documentation line
io_handler.write_line("\t\t{0}", doc)
def __print_namespace_help(self, io_handler, namespace, cmd_name=None):
"""
Prints the documentation of all the commands in the given name space,
or only of the given command
:param io_handler: I/O Handler
:param namespace: Name space of the command
:param cmd_name: Name of the command to show, None to show them all
"""
io_handler.write_line("=== Name space '{0}' ===", namespace)
# Get all commands in this name space
if cmd_name is None:
names = [command for command in self._commands[namespace]]
names.sort()
else:
names = [cmd_name]
first_cmd = True
for command in names:
if not first_cmd:
# Print an empty line
io_handler.write_line('\n')
self.__print_command_help(io_handler, namespace, command)
first_cmd = False
def print_help(self, io_handler, command=None):
"""
Prints the available methods and their documentation, or the
documentation of the given command.
"""
if command:
# Single command mode
if command in self._commands:
# Argument is a name space
self.__print_namespace_help(io_handler, command)
was_namespace = True
else:
was_namespace = False
# Also print the name of matching commands
try:
# Extract command name space and name
possibilities = self.get_ns_commands(command)
except ValueError as ex:
# Unknown command
if not was_namespace:
# ... and no name space were matching either -> error
io_handler.write_line(str(ex))
return False
else:
# Print the help of the found command
if was_namespace:
# Give some space
io_handler.write_line('\n\n')
for namespace, cmd_name in possibilities:
self.__print_namespace_help(io_handler, namespace,
cmd_name)
else:
# Get all name spaces
namespaces = list(self._commands.keys())
namespaces.remove(DEFAULT_NAMESPACE)
namespaces.sort()
namespaces.insert(0, DEFAULT_NAMESPACE)
first_ns = True
for namespace in namespaces:
if not first_ns:
# Add empty lines
io_handler.write_line('\n\n')
# Print the help of all commands
self.__print_namespace_help(io_handler, namespace)
first_ns = False
def properties_list(self, io_handler):
"""
Lists the properties of the framework
"""
# Get the framework
framework = self._context.get_bundle(0)
# Head of the table
headers = ('Property Name', 'Value')
# Lines
lines = [item for item in framework.get_properties().items()]
# Sort lines
lines.sort()
# Print the table
io_handler.write(self._utils.make_table(headers, lines))
def property_value(self, io_handler, name):
"""
Prints the value of the given property, looking into
framework properties then environment variables.
"""
value = self._context.get_property(name)
if value is None:
# Avoid printing "None"
value = ""
io_handler.write_line(str(value))
def environment_list(self, io_handler):
"""
Lists the framework process environment variables
"""
# Head of the table
headers = ('Environment Variable', 'Value')
# Lines
lines = [item for item in os.environ.items()]
# Sort lines
lines.sort()
# Print the table
io_handler.write(self._utils.make_table(headers, lines))
def environment_value(self, io_handler, name):
"""
Prints the value of the given environment variable
"""
io_handler.write_line(os.getenv(name))
def threads_list(self, io_handler):
"""
Lists the active threads and their current code line
"""
try:
# Extract frames
frames = sys._current_frames()
except AttributeError:
io_handler.write_line("sys._current_frames() is not available.")
return
# Get the thread ID -> Thread mapping
names = threading._active.copy()
# Sort by thread ID
thread_ids = list(frames.keys())
thread_ids.sort()
lines = []
for thread_id in thread_ids:
# Get the corresponding stack
stack = frames[thread_id]
# Try to get the thread name
try:
name = names[thread_id].name
except KeyError:
name = "<unknown>"
# Construct the code position
lines.append('Thread ID: {0} - Name: {1}'.format(thread_id, name))
lines.append('Line:')
lines.extend(line.rstrip()
for line in traceback.format_stack(stack, 1))
lines.append('')
lines.append('')
# Sort the lines
io_handler.write('\n'.join(lines))
def thread_details(self, io_handler, thread_id):
"""
Prints details about the thread with the given ID (not its name)
"""
try:
# Get the stack
thread_id = int(thread_id)
stack = sys._current_frames()[thread_id]
except KeyError:
io_handler.write_line("Unknown thread ID: {0}", thread_id)
except ValueError:
io_handler.write_line("Invalid thread ID: {0}", thread_id)
except AttributeError:
io_handler.write_line("sys._current_frames() is not available.")
else:
# Get the name
try:
name = threading._active[thread_id].name
except KeyError:
name = "<unknown>"
lines = ['Thread ID: {0} - Name: {1}'.format(thread_id, name),
'Stack trace:']
trace_lines = []
frame = stack
while frame is not None:
# Store the line information
trace_lines.append(self.__format_frame_info(frame))
# Previous frame...
frame = frame.f_back
# Reverse the lines
trace_lines.reverse()
# Add them to the printed lines
lines.extend(trace_lines)
lines.append('')
io_handler.write('\n'.join(lines))
def __format_frame_info(self, frame):
"""
Formats the given stack frame to show its position in the code and
part of its context
:param frame: A stack frame
"""
# Same as in traceback.extract_stack
lineno = frame.f_lineno
code = frame.f_code
filename = code.co_filename
method_name = code.co_name
linecache.checkcache(filename)
output_lines = []
try:
# Try to get the type of the calling object
instance = frame.f_locals['self']
method_name = '{0}::{1}' \
.format(type(instance).__name__, method_name)
except KeyError:
# Not called from a bound method
pass
# File & line
output_lines.append(' File "{0}", line {1}, in {2}'
.format(filename, lineno, method_name))
# Arguments
arginfo = inspect.getargvalues(frame)
for name in arginfo.args:
output_lines.append(' - {0:s} = {1}'
.format(name, repr(frame.f_locals[name])))
if arginfo.varargs:
output_lines.append(' - *{0:s} = {1}'
.format(arginfo.varargs,
frame.f_locals[arginfo.varargs]))
if arginfo.keywords:
output_lines.append(' - **{0:s} = {1}'
.format(arginfo.keywords,
frame.f_locals[arginfo.keywords]))
# Line block
lines = self.__extract_lines(filename, frame.f_globals, lineno, 3)
if lines:
output_lines.append('')
prefix = ' '
output_lines.append('{0}{1}'
.format(prefix,
'\n{0}'.format(prefix).join(lines)))
return '\n'.join(output_lines)
def __extract_lines(self, filename, f_globals, lineno, around):
"""
Extracts a block of lines from the given file
:param filename: Name of the source file
:param f_globals: Globals of the frame of the current code
:param lineno: Current line of code
:param around: Number of line to print before and after the current one
"""
current_line = linecache.getline(filename, lineno, f_globals)
if not current_line:
# No data on this line
return ''
lines = []
# Add some lines before
for pre_lineno in range(lineno - around, lineno):
pre_line = linecache.getline(filename, pre_lineno, f_globals)
lines.append('{0}'.format(pre_line.rstrip()))
# The line itself
lines.append('{0}'.format(current_line.rstrip()))
# Add some lines after
for pre_lineno in range(lineno + 1, lineno + around + 1):
pre_line = linecache.getline(filename, pre_lineno, f_globals)
lines.append('{0}'.format(pre_line.rstrip()))
# Smart left strip
minimal_tab = None
for line in lines:
if line.strip():
tab = len(line) - len(line.lstrip())
if minimal_tab is None or tab < minimal_tab:
minimal_tab = tab
if minimal_tab > 0:
lines = [line[minimal_tab:] for line in lines]
# Add some place for a marker
marked_line = '>> {0}'.format(lines[around])
lines = [' {0}'.format(line) for line in lines]
lines[around] = marked_line
lines.append('')
# Return the lines
return lines
def log_level(self, io_handler, level=None, name=None):
"""
Prints/Changes log level
"""
# Get the logger
logger = logging.getLogger(name)
# Normalize the name
if not name:
name = "Root"
if not level:
# Level not given: print the logger level
io_handler.write_line(
"{0} log level: {1} (real: {2})",
name,
logging.getLevelName(logger.getEffectiveLevel()),
logging.getLevelName(logger.level))
else:
# Set the logger level
try:
logger.setLevel(level.upper())
io_handler.write_line("New level for {0}: {1}", name, level)
except ValueError:
io_handler.write_line("Invalid log level: {0}", level)
def quit(self, io_handler):
"""
Stops the current shell session (raises a KeyboardInterrupt exception)
"""
io_handler.write_line("Raising KeyboardInterrupt to stop main thread")
raise KeyboardInterrupt()
def __get_bundle(self, io_handler, bundle_id):
"""
Retrieves the Bundle object with the given bundle ID. Writes errors
through the I/O handler if any.
:param io_handler: I/O Handler
:param bundle_id: String or integer bundle ID
:return: The Bundle object matching the given ID, None if not found
"""
try:
bundle_id = int(bundle_id)
return self._context.get_bundle(bundle_id)
except (TypeError, ValueError):
io_handler.write_line("Invalid bundle ID: {0}", bundle_id)
except constants.BundleException:
io_handler.write_line("Unknown bundle: {0}", bundle_id)
def start(self, io_handler, bundle_id, *bundles_ids):
"""
Starts the bundles with the given IDs. Stops on first failure.
"""
for bid in (bundle_id,) + bundles_ids:
bundle = self.__get_bundle(io_handler, bid)
if bundle is not None:
io_handler.write_line("Starting bundle {0}...", bid)
bundle.start()
else:
return False
def stop(self, io_handler, bundle_id, *bundles_ids):
"""
Stops the bundles with the given IDs. Stops on first failure.
"""
for bid in (bundle_id,) + bundles_ids:
bundle = self.__get_bundle(io_handler, bid)
if bundle is not None:
io_handler.write_line("Stopping bundle {0}...", bid)
bundle.stop()
else:
return False
def update(self, io_handler, bundle_id, *bundles_ids):
"""
Updates the bundles with the given IDs. Stops on first failure.
"""
for bid in (bundle_id,) + bundles_ids:
bundle = self.__get_bundle(io_handler, bid)
if bundle is not None:
io_handler.write_line("Updating bundle {0}...", bid)
bundle.update()
else:
return False
def install(self, io_handler, module_name):
"""
Installs the bundle with the given module name
"""
bundle = self._context.install_bundle(module_name)
io_handler.write_line("Bundle ID: {0}", bundle.get_bundle_id())
return bundle.get_bundle_id()
def uninstall(self, io_handler, bundle_id, *bundles_ids):
"""
Uninstalls the bundles with the given IDs. Stops on first failure.
"""
for bid in (bundle_id,) + bundles_ids:
bundle = self.__get_bundle(io_handler, bid)
if bundle is not None:
io_handler.write_line("Uninstalling bundle {0}...", bid)
bundle.uninstall()
else:
return False
# ------------------------------------------------------------------------------
@constants.BundleActivator
class PelixActivator(object):
"""
Activator class for Pelix
"""
def __init__(self):
"""
Sets up the activator
"""
self._shell = None
self._shell_reg = None
self._utils_reg = None
def service_changed(self, event):
"""
Called when a command provider service event occurred
"""
kind = event.get_kind()
reference = event.get_service_reference()
if kind in (pelix.ServiceEvent.REGISTERED,
pelix.ServiceEvent.MODIFIED):
# New or modified service
self._shell._bind_handler(reference)
else:
# Service gone or not matching anymore
self._shell._unbind_handler(reference)
def start(self, context):
"""
Bundle starting
:param context: The bundle context
"""
try:
# Prepare the shell utility service
utils = ShellUtils()
self._shell = Shell(context, utils)
self._shell_reg = context.register_service(
SERVICE_SHELL, self._shell, {})
self._utils_reg = context.register_service(
SERVICE_SHELL_UTILS, utils, {})
# Register the service listener
context.add_service_listener(self, None, SERVICE_SHELL_COMMAND)
# Register existing command services
refs = context.get_all_service_references(SERVICE_SHELL_COMMAND)
if refs is not None:
for ref in refs:
self._shell._bind_handler(ref)
_logger.info("Shell services registered")
except constants.BundleException as ex:
_logger.exception("Error registering the shell service: %s", ex)
def stop(self, context):
"""
Bundle stopping
:param context: The bundle context
"""
# Unregister the service listener
context.remove_service_listener(self)
# Unregister the services
if self._shell_reg is not None:
self._shell_reg.unregister()
self._shell_reg = None
if self._utils_reg is not None:
self._utils_reg.unregister()
self._utils_reg = None
self._shell = None
_logger.info("Shell services unregistered")
|
isandlaTech/cohorte-demos
|
led/dump/led-demo-yun/cohorte/dist/cohorte-1.0.0-20141216.234517-57-python-distribution/repo/pelix/shell/core.py
|
Python
|
apache-2.0
| 48,234 | 0 |
from .depth import *
from .camera import *
from .contact import *
from .imagefeature import *
from .arduino import *
|
poppy-project/pypot
|
pypot/sensor/__init__.py
|
Python
|
gpl-3.0
| 117 | 0 |
# -*- coding: utf-8 -*-
"""
# This is authentication backend for Django middleware.
# In settings.py you need to set:
MIDDLEWARE_CLASSES = (
...
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
...
)
AUTHENTICATION_BACKENDS = (
'kobo.django.auth.krb5.RemoteUserBackend',
)
# Add login and logout adresses to urls.py:
urlpatterns = patterns("",
...
url(r'^auth/krb5login/$',
django.views.generic.TemplateView.as_view(template = 'auth/krb5login.html'),
url(r'^auth/logout/$', 'django.contrib.auth.views.logout', kwargs={"next_page": "/"}),
...
)
# Set a httpd config to protect krb5login page with kerberos.
# You need to have mod_auth_kerb installed to use kerberos auth.
# Httpd config /etc/httpd/conf.d/<project>.conf should look like this:
<Location "/">
SetHandler python-program
PythonHandler django.core.handlers.modpython
SetEnv DJANGO_SETTINGS_MODULE <project>.settings
PythonDebug On
</Location>
<Location "/auth/krb5login">
AuthType Kerberos
AuthName "<project> Kerberos Authentication"
KrbMethodNegotiate on
KrbMethodK5Passwd off
KrbServiceName HTTP
KrbAuthRealms EXAMPLE.COM
Krb5Keytab /etc/httpd/conf/http.<hostname>.keytab
KrbSaveCredentials off
Require valid-user
</Location>
"""
from django.contrib.auth.backends import RemoteUserBackend
class Krb5RemoteUserBackend(RemoteUserBackend):
def clean_username(self, username):
# remove @REALM from username
return username.split("@")[0]
|
pombredanne/https-git.fedorahosted.org-git-kobo
|
kobo/django/auth/krb5.py
|
Python
|
lgpl-2.1
| 1,587 | 0.00189 |
from distutils.core import setup
from setuptools import setup, find_packages
setup(
name = 'gooeydist',
packages = find_packages(), # this must be the same as the name above
version = '0.2',
description = 'Gooey Language',
author = 'Gooey Comps',
author_email = 'harrise@carleton.edu',
url = 'https://github.com/GooeyComps/gooey-dist', # use the URL to the github repo
download_url = 'https://github.com/GooeyComps/gooey-dist/tarball/0.2', # I'll explain this in a second
keywords = ['gui'], # arbitrary keywords
classifiers = [],
)
|
GooeyComps/gooey-dist
|
setup.py
|
Python
|
mit
| 542 | 0.068266 |
# -*- coding: utf-8 -*-
from loading import load_plugins, register_plugin
from plugz import PluginTypeBase
from plugintypes import StandardPluginType
__author__ = 'Matti Gruener'
__email__ = 'matti@mistermatti.com'
__version__ = '0.1.5'
__ALL__ = [load_plugins, register_plugin, StandardPluginType, PluginTypeBase]
|
mistermatti/plugz
|
plugz/__init__.py
|
Python
|
bsd-3-clause
| 317 | 0 |
'''
go list comprehensions
'''
def main():
a = []int(x for x in range(3))
TestError( len(a)==3 )
TestError( a[0]==0 )
TestError( a[1]==1 )
TestError( a[2]==2 )
|
pombredanne/PythonJS
|
regtests/go/list_comprehension.py
|
Python
|
bsd-3-clause
| 167 | 0.107784 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-09-14 23:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organization', '0004_teacher_image'),
('courses', '0006_auto_20170914_2345'),
]
operations = [
migrations.AddField(
model_name='course',
name='teacher',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organization.Teacher', verbose_name='\u8bb2\u5e08'),
),
]
|
LennonChin/Django-Practices
|
MxOnline/apps/courses/migrations/0007_course_teacher.py
|
Python
|
apache-2.0
| 640 | 0.001563 |
import dns
import os
import socket
import struct
import threading
import time
import clientsubnetoption
import subprocess
from recursortests import RecursorTest
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
emptyECSText = 'No ECS received'
nameECS = 'ecs-echo.example.'
nameECSInvalidScope = 'invalid-scope.ecs-echo.example.'
ttlECS = 60
routingReactorRunning = False
class RoutingTagTest(RecursorTest):
_config_template_default = """
daemon=no
trace=yes
dont-query=
ecs-add-for=0.0.0.0/0
local-address=127.0.0.1
packetcache-ttl=0
packetcache-servfail-ttl=0
max-cache-ttl=600
threads=1
loglevel=9
disable-syslog=yes
"""
def sendECSQuery(self, query, expected, expectedFirstTTL=None):
res = self.sendUDPQuery(query)
self.assertRcodeEqual(res, dns.rcode.NOERROR)
self.assertRRsetInAnswer(res, expected)
# this will break if you are not looking for the first RR, sorry!
if expectedFirstTTL is not None:
self.assertEqual(res.answer[0].ttl, expectedFirstTTL)
else:
expectedFirstTTL = res.answer[0].ttl
# wait one second, check that the TTL has been
# decreased indicating a cache hit
time.sleep(1)
res = self.sendUDPQuery(query)
self.assertRcodeEqual(res, dns.rcode.NOERROR)
self.assertRRsetInAnswer(res, expected)
self.assertLess(res.answer[0].ttl, expectedFirstTTL)
def checkECSQueryHit(self, query, expected):
res = self.sendUDPQuery(query)
self.assertRcodeEqual(res, dns.rcode.NOERROR)
self.assertRRsetInAnswer(res, expected)
# this will break if you are not looking for the first RR, sorry!
self.assertLess(res.answer[0].ttl, ttlECS)
def setRoutingTag(self, tag):
# This value is picked up by the gettag()
file = open('tagfile', 'w')
if tag:
file.write(tag)
file.close();
@classmethod
def startResponders(cls):
global routingReactorRunning
print("Launching responders..")
address = cls._PREFIX + '.24'
port = 53
if not routingReactorRunning:
reactor.listenUDP(port, UDPRoutingResponder(), interface=address)
routingReactorRunning = True
if not reactor.running:
cls._UDPResponder = threading.Thread(name='UDP Routing Responder', target=reactor.run, args=(False,))
cls._UDPResponder.setDaemon(True)
cls._UDPResponder.start()
@classmethod
def setUpClass(cls):
cls.setUpSockets()
cls.startResponders()
confdir = os.path.join('configs', cls._confdir)
cls.createConfigDir(confdir)
cls.generateRecursorConfig(confdir)
cls.startRecursor(confdir, cls._recursorPort)
print("Launching tests..")
@classmethod
def tearDownClass(cls):
cls.tearDownRecursor()
os.unlink('tagfile')
class testRoutingTag(RoutingTagTest):
_confdir = 'RoutingTag'
_config_template = """
log-common-errors=yes
use-incoming-edns-subnet=yes
edns-subnet-whitelist=ecs-echo.example.
forward-zones=ecs-echo.example=%s.24
""" % (os.environ['PREFIX'])
_lua_dns_script_file = """
function gettag(remote, ednssubnet, localip, qname, qtype, ednsoptions, tcp, proxyProtocolValues)
local rtag
for line in io.lines('tagfile') do
rtag = line
break
end
return 0, nil, nil, nil, nil, nil, rtag
end
"""
def testSendECS(self):
# First send an ECS query with routingTag
self.setRoutingTag('foo')
expected1 = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'TXT', '192.0.2.0/24')
ecso = clientsubnetoption.ClientSubnetOption('192.0.2.1', 32)
query = dns.message.make_query(nameECS, 'TXT', 'IN', use_edns=True, options=[ecso], payload=512)
self.sendECSQuery(query, expected1)
# Now check a cache hit with the same routingTag (but no ECS)
query = dns.message.make_query(nameECS, 'TXT', 'IN')
self.checkECSQueryHit(query, expected1)
expected2 = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'TXT', '127.0.0.0/24')
# And see if a different tag does *not* hit the first one
self.setRoutingTag('bar')
query = dns.message.make_query(nameECS, 'TXT', 'IN')
self.sendECSQuery(query, expected2)
# And see if a *no* tag does *not* hit the first one
expected3 = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'TXT', '192.0.3.0/24')
self.setRoutingTag(None)
ecso = clientsubnetoption.ClientSubnetOption('192.0.3.1', 32)
query = dns.message.make_query(nameECS, 'TXT', 'IN', use_edns=True, options=[ecso], payload=512)
self.sendECSQuery(query, expected3)
# And see if an unknown tag from the same subnet does hit the last
self.setRoutingTag('baz')
ecso = clientsubnetoption.ClientSubnetOption('192.0.3.2', 32)
query = dns.message.make_query(nameECS, 'TXT', 'IN', use_edns=True, options=[ecso], payload=512)
self.checkECSQueryHit(query, expected3)
# And a no tag and no subnet query does hit the general case
self.setRoutingTag(None)
query = dns.message.make_query(nameECS, 'TXT', 'IN')
self.sendECSQuery(query, expected2)
# And a unknown tag and no subnet query does hit the general case
self.setRoutingTag('bag')
query = dns.message.make_query(nameECS, 'TXT', 'IN')
self.sendECSQuery(query, expected2)
return # remove this line to peek at cache
rec_controlCmd = [os.environ['RECCONTROL'],
'--config-dir=%s' % 'configs/' + self._confdir,
'dump-cache', 'x']
try:
expected = b'dumped 7 records\n'
ret = subprocess.check_output(rec_controlCmd, stderr=subprocess.STDOUT)
self.assertEqual(ret, expected)
except subprocess.CalledProcessError as e:
print(e.output)
raise
class testRoutingTagFFI(RoutingTagTest):
_confdir = 'RoutingTagFFI'
_config_template = """
log-common-errors=yes
use-incoming-edns-subnet=yes
edns-subnet-whitelist=ecs-echo.example.
forward-zones=ecs-echo.example=%s.24
""" % (os.environ['PREFIX'])
_lua_dns_script_file = """
local ffi = require("ffi")
ffi.cdef[[
typedef struct pdns_ffi_param pdns_ffi_param_t;
const char* pdns_ffi_param_get_qname(pdns_ffi_param_t* ref);
void pdns_ffi_param_set_routingtag(pdns_ffi_param_t* ref, const char* rtag);
]]
function gettag_ffi(obj)
for line in io.lines('tagfile') do
local rtag = ffi.string(line)
ffi.C.pdns_ffi_param_set_routingtag(obj, rtag)
break
end
return 0
end
"""
def testSendECS(self):
# First send an ECS query with routingTag
self.setRoutingTag('foo')
expected1 = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'TXT', '192.0.2.0/24')
ecso = clientsubnetoption.ClientSubnetOption('192.0.2.1', 32)
query = dns.message.make_query(nameECS, 'TXT', 'IN', use_edns=True, options=[ecso], payload=512)
self.sendECSQuery(query, expected1)
# Now check a cache hit with the same routingTag (but no ECS)
query = dns.message.make_query(nameECS, 'TXT', 'IN')
self.checkECSQueryHit(query, expected1)
expected2 = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'TXT', '127.0.0.0/24')
# And see if a different tag does *not* hit the first one
self.setRoutingTag('bar')
query = dns.message.make_query(nameECS, 'TXT', 'IN')
self.sendECSQuery(query, expected2)
# And see if a *no* tag does *not* hit the first one
expected3 = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'TXT', '192.0.3.0/24')
self.setRoutingTag(None)
ecso = clientsubnetoption.ClientSubnetOption('192.0.3.1', 32)
query = dns.message.make_query(nameECS, 'TXT', 'IN', use_edns=True, options=[ecso], payload=512)
self.sendECSQuery(query, expected3)
# And see if an unknown tag from the same subnet does hit the last
self.setRoutingTag('baz')
ecso = clientsubnetoption.ClientSubnetOption('192.0.3.2', 32)
query = dns.message.make_query(nameECS, 'TXT', 'IN', use_edns=True, options=[ecso], payload=512)
self.checkECSQueryHit(query, expected3)
# And a no tag and no subnet query does hit the general case
self.setRoutingTag(None)
query = dns.message.make_query(nameECS, 'TXT', 'IN')
self.sendECSQuery(query, expected2)
# And a unknown tag and no subnet query does hit the general case
self.setRoutingTag('bag')
query = dns.message.make_query(nameECS, 'TXT', 'IN')
self.sendECSQuery(query, expected2)
return #remove this line to peek at cache
rec_controlCmd = [os.environ['RECCONTROL'],
'--config-dir=%s' % 'configs/' + self._confdir,
'dump-cache y']
try:
expected = 'dumped 6 records\n'
ret = subprocess.check_output(rec_controlCmd, stderr=subprocess.STDOUT)
self.assertEqual(ret, expected)
except subprocess.CalledProcessError as e:
print(e.output)
raise
class UDPRoutingResponder(DatagramProtocol):
@staticmethod
def ipToStr(option):
if option.family == clientsubnetoption.FAMILY_IPV4:
ip = socket.inet_ntop(socket.AF_INET, struct.pack('!L', option.ip))
elif option.family == clientsubnetoption.FAMILY_IPV6:
ip = socket.inet_ntop(socket.AF_INET6,
struct.pack('!QQ',
option.ip >> 64,
option.ip & (2 ** 64 - 1)))
return ip
def datagramReceived(self, datagram, address):
request = dns.message.from_wire(datagram)
response = dns.message.make_response(request)
response.flags |= dns.flags.AA
ecso = None
if (request.question[0].name == dns.name.from_text(nameECS) or request.question[0].name == dns.name.from_text(nameECSInvalidScope)) and request.question[0].rdtype == dns.rdatatype.TXT:
text = emptyECSText
for option in request.options:
if option.otype == clientsubnetoption.ASSIGNED_OPTION_CODE and isinstance(option, clientsubnetoption.ClientSubnetOption):
text = self.ipToStr(option) + '/' + str(option.mask)
# Send a scope more specific than the received source for nameECSInvalidScope
if request.question[0].name == dns.name.from_text(nameECSInvalidScope):
ecso = clientsubnetoption.ClientSubnetOption("192.0.42.42", 32, 32)
else:
ecso = clientsubnetoption.ClientSubnetOption(self.ipToStr(option), option.mask, option.mask)
answer = dns.rrset.from_text(request.question[0].name, ttlECS, dns.rdataclass.IN, 'TXT', text)
response.answer.append(answer)
elif request.question[0].name == dns.name.from_text(nameECS) and request.question[0].rdtype == dns.rdatatype.NS:
answer = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'NS', 'ns1.ecs-echo.example.')
response.answer.append(answer)
additional = dns.rrset.from_text('ns1.ecs-echo.example.', 15, dns.rdataclass.IN, 'A', os.environ['PREFIX'] + '.24')
response.additional.append(additional)
if ecso:
response.use_edns(options = [ecso])
self.transport.write(response.to_wire(), address)
|
shinsterneck/pdns
|
regression-tests.recursor-dnssec/test_RoutingTag.py
|
Python
|
gpl-2.0
| 11,767 | 0.003144 |
###############################################################################
# _*_ coding: utf-8
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from __future__ import unicode_literals
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'utf8_03.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with utf-8 strings."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet('Café')
worksheet.write('A1', 'Café')
workbook.close()
self.assertExcelEqual()
|
jvrsantacruz/XlsxWriter
|
xlsxwriter/test/comparison/test_utf8_03.py
|
Python
|
bsd-2-clause
| 1,089 | 0 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ftrl-proximal for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.FtrlOptimizer"])
class FtrlOptimizer(optimizer.Optimizer):
"""Optimizer that implements the FTRL algorithm.
This version has support for both online L2 (McMahan et al., 2013) and
shrinkage-type L2, which is the addition of an L2 penalty
to the loss function.
References:
Ad-click prediction:
[McMahan et al., 2013](https://dl.acm.org/citation.cfm?id=2488200)
([pdf](https://dl.acm.org/ft_gateway.cfm?id=2488200&ftid=1388399&dwn=1&CFID=32233078&CFTOKEN=d60fe57a294c056a-CB75C374-F915-E7A6-1573FBBC7BF7D526))
"""
def __init__(self,
learning_rate,
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0,
use_locking=False,
name="Ftrl",
accum_name=None,
linear_name=None,
l2_shrinkage_regularization_strength=0.0,
beta=None):
r"""Construct a new FTRL optimizer.
Args:
learning_rate: A float value or a constant float `Tensor`.
learning_rate_power: A float value, must be less or equal to zero.
Controls how the learning rate decreases during training. Use zero for
a fixed learning rate. See section 3.1 in (McMahan et al., 2013).
initial_accumulator_value: The starting value for accumulators.
Only zero or positive values are allowed.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Ftrl".
accum_name: The suffix for the variable that keeps the gradient squared
accumulator. If not present, defaults to name.
linear_name: The suffix for the variable that keeps the linear gradient
accumulator. If not present, defaults to name + "_1".
l2_shrinkage_regularization_strength: A float value, must be greater than
or equal to zero. This differs from L2 above in that the L2 above is a
stabilization penalty, whereas this L2 shrinkage is a magnitude penalty.
The FTRL formulation can be written as:
w_{t+1} = argmin_w(\hat{g}_{1:t}w + L1*||w||_1 + L2*||w||_2^2), where
\hat{g} = g + (2*L2_shrinkage*w), and g is the gradient of the loss
function w.r.t. the weights w.
Specifically, in the absence of L1 regularization, it is equivalent to
the following update rule:
w_{t+1} = w_t - lr_t / (beta + 2*L2*lr_t) * g_t -
2*L2_shrinkage*lr_t / (beta + 2*L2*lr_t) * w_t
where lr_t is the learning rate at t.
When input is sparse shrinkage will only happen on the active weights.
beta: A float value; corresponds to the beta parameter in the paper.
Raises:
ValueError: If one of the arguments is invalid.
References:
Ad-click prediction:
[McMahan et al., 2013](https://dl.acm.org/citation.cfm?id=2488200)
([pdf](https://dl.acm.org/ft_gateway.cfm?id=2488200&ftid=1388399&dwn=1&CFID=32233078&CFTOKEN=d60fe57a294c056a-CB75C374-F915-E7A6-1573FBBC7BF7D526))
"""
super(FtrlOptimizer, self).__init__(use_locking, name)
if initial_accumulator_value < 0.0:
raise ValueError(
"initial_accumulator_value %f needs to be positive or zero" %
initial_accumulator_value)
if learning_rate_power > 0.0:
raise ValueError("learning_rate_power %f needs to be negative or zero" %
learning_rate_power)
if l1_regularization_strength < 0.0:
raise ValueError(
"l1_regularization_strength %f needs to be positive or zero" %
l1_regularization_strength)
if l2_regularization_strength < 0.0:
raise ValueError(
"l2_regularization_strength %f needs to be positive or zero" %
l2_regularization_strength)
if l2_shrinkage_regularization_strength < 0.0:
raise ValueError(
"l2_shrinkage_regularization_strength %f needs to be positive"
" or zero" % l2_shrinkage_regularization_strength)
self._learning_rate = learning_rate
self._learning_rate_power = learning_rate_power
self._initial_accumulator_value = initial_accumulator_value
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
self._beta = (0.0 if beta is None else beta)
self._l2_shrinkage_regularization_strength = (
l2_shrinkage_regularization_strength)
self._learning_rate_tensor = None
self._learning_rate_power_tensor = None
self._l1_regularization_strength_tensor = None
self._adjusted_l2_regularization_strength_tensor = None
self._l2_shrinkage_regularization_strength_tensor = None
self._accum_name = accum_name
self._linear_name = linear_name
def _create_slots(self, var_list):
# Create the "accum" and "linear" slots.
for v in var_list:
val = constant_op.constant(
self._initial_accumulator_value, dtype=v.dtype, shape=v.get_shape())
self._get_or_make_slot(v, val, "accum", self._accum_name or self._name)
self._zeros_slot(v, "linear", self._linear_name or self._name)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(
self._learning_rate, name="learning_rate")
self._l1_regularization_strength_tensor = ops.convert_to_tensor(
self._l1_regularization_strength, name="l1_regularization_strength")
# L2 regularization strength with beta added in so that the underlying
# TensorFlow ops do not need to include that parameter.
self._adjusted_l2_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_regularization_strength + self._beta /
(2. * math_ops.maximum(self._learning_rate, 1e-36)),
name="adjusted_l2_regularization_strength")
assert self._adjusted_l2_regularization_strength_tensor is not None
self._beta_tensor = ops.convert_to_tensor(self._beta, name="beta")
self._l2_shrinkage_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_shrinkage_regularization_strength,
name="l2_shrinkage_regularization_strength")
self._learning_rate_power_tensor = ops.convert_to_tensor(
self._learning_rate_power, name="learning_rate_power")
def _apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.apply_ftrl(
var,
accum,
linear,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._adjusted_l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.apply_ftrl_v2(
var,
accum,
linear,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._adjusted_l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.resource_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._adjusted_l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.resource_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._adjusted_l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.sparse_apply_ftrl(
var,
accum,
linear,
grad.values,
grad.indices,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._adjusted_l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.sparse_apply_ftrl_v2(
var,
accum,
linear,
grad.values,
grad.indices,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._adjusted_l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
grad.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.resource_sparse_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._adjusted_l2_regularization_strength_tensor,
grad.dtype),
math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
use_locking=self._use_locking)
else:
return training_ops.resource_sparse_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._adjusted_l2_regularization_strength_tensor,
grad.dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
grad.dtype),
math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
use_locking=self._use_locking)
|
freedomtan/tensorflow
|
tensorflow/python/training/ftrl.py
|
Python
|
apache-2.0
| 13,362 | 0.00232 |
# File: setup.py
# Version: 3
# Description: Setup for SHA2017 badge
# License: MIT
# Authors: Renze Nicolai <renze@rnplus.nl>
# Thomas Roos <?>
import ugfx, badge, appglue, dialogs, easydraw, time
def asked_nickname(value):
if value:
badge.nvs_set_str("owner", "name", value)
# Do the firstboot magic
newState = 1 if badge.nvs_get_u8('badge', 'setup.state', 0) == 0 else 3
badge.nvs_set_u8('badge', 'setup.state', newState)
# Show the user that we are done
easydraw.msg("Hi "+value+"!", 'Your nick has been stored to flash!')
time.sleep(0.5)
else:
badge.nvs_set_u8('badge', 'setup.state', 2) # Skip the sponsors
badge.nvs_set_u8('sponsors', 'shown', 1)
appglue.home()
ugfx.init()
nickname = badge.nvs_get_str("owner", "name", "")
dialogs.prompt_text("Nickname", nickname, cb=asked_nickname)
|
SHA2017-badge/micropython-esp32
|
esp32/modules/setup.py
|
Python
|
mit
| 894 | 0.004474 |
import os
from home.models import ReplicaSet, WhatTorrent, WhatFulltext
def run_checks():
errors = []
warnings = []
# Check WhatFulltext integrity
def check_whatfulltext():
w_torrents = dict((w.id, w) for w in WhatTorrent.objects.defer('torrent_file').all())
w_fulltext = dict((w.id, w) for w in WhatFulltext.objects.all())
for id, w_t in w_torrents.items():
if id not in w_fulltext:
errors.append(u'{0} does not have a matching fulltext entry.'.format(w_t))
elif not w_fulltext[id].match(w_t):
errors.append(u'{0} does not match info with fulltext entry.'.format(w_t))
for id, w_f in w_fulltext.items():
if id not in w_torrents:
errors.append(u'{0} does not have a matching whattorrent entry.'.format(w_f))
check_whatfulltext()
for replica_set in ReplicaSet.objects.all():
m_torrents = {}
for instance in replica_set.transinstance_set.all():
i_m_torrents = instance.get_m_torrents_by_hash()
i_t_torrents = instance.get_t_torrents_by_hash(['id', 'hashString'])
for hash, m_torrent in i_m_torrents.items():
# Check if this torrent is already in another instance
if hash in m_torrents:
warnings.append(u'{0} is already in another instance of '
u'the same replica set: {1}'
.format(m_torrent, m_torrents[hash].instance))
# Check if the instance has the torrent
if hash not in i_t_torrents:
errors.append(u'{0} is in DB, but not in Transmission at instance {1}'
.format(m_torrent, instance))
m_torrents[hash] = m_torrent
# Check for the presence of metafiles if the instance is a master
if replica_set.is_master:
files_in_dir = os.listdir(m_torrent.path)
if not any('.torrent' in f for f in files_in_dir):
errors.append(u'Missing .torrent file for {0} at {1}'
.format(m_torrent, instance))
if not any('ReleaseInfo2.txt' == f for f in files_in_dir):
errors.append(u'Missing ReleaseInfo2.txt for {0} at {1}'
.format(m_torrent, instance))
for hash, t_torrent in i_t_torrents.items():
# Check if the database has the torrent
if hash not in i_m_torrents:
errors.append(u'{0} is in Transmission, but not in DB at instance {1}'
.format(t_torrent, instance))
return {
'errors': errors,
'warnings': warnings
}
|
MADindustries/WhatManager2
|
WhatManager2/checks.py
|
Python
|
mit
| 2,864 | 0.003492 |
import asposecellscloud
from asposecellscloud.CellsApi import CellsApi
from asposecellscloud.CellsApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Cells API SDK
api_client = asposecellscloud.ApiClient.ApiClient(apiKey, appSid, True)
cellsApi = CellsApi(api_client);
#set input file name
filename = "Sample_Test_Book.xls"
sheetName = "Sheet1"
range = "A1:A12"
#upload file to aspose cloud storage
storageApi.PutCreate(Path=filename, file=data_folder + filename)
try:
#invoke Aspose.Cells Cloud SDK API to clear cells formatting in a worksheet
response = cellsApi.PostClearFormats(name=filename, sheetName=sheetName, range=range)
if response.Status == "OK":
#download updated Workbook from storage server
response = storageApi.GetDownload(Path=filename)
outfilename = "c:/temp/" + filename
with open(outfilename, 'wb') as f:
for chunk in response.InputStream:
f.write(chunk)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
|
asposecells/Aspose_Cells_Cloud
|
Examples/Python/Examples/ClearCellFormattingInExcelWorksheet.py
|
Python
|
mit
| 1,485 | 0.010101 |
# Support for building census bundles in Ambry
__version__ = 0.8
__author__ = 'eric@civicknowledge.com'
from .generator import *
from .schema import *
from .sources import *
from .transforms import *
import ambry.bundle
class AcsBundle(ambry.bundle.Bundle, MakeTablesMixin, MakeSourcesMixin,
JamValueMixin, JoinGeofileMixin):
# Which of the first columns in the data tables to use.
header_cols = [
# Column name, Description, width, datatype, column position
#('FILEID','File Identification',6,'str' ),
#('FILETYPE','File Type',6,'str'),
('STUSAB','State/U.S.-Abbreviation (USPS)',2,'str',2 ),
('CHARITER','Character Iteration',3,'str',3 ),
('SEQUENCE','Sequence Number',4,'int',4 ),
('LOGRECNO','Logical Record Number',7,'int',5 )
]
def init(self):
from .util import year_release
self.year, self.release = year_release(self)
self.log("Building Census bundle, year {}, release {}".format(self.year, self.release))
def edit_pipeline(self, pipeline):
"""Change the SelectPartitionFromSource so it only writes a single partition"""
from ambry.etl import SelectPartitionFromSource
# THe partition is named only after the table.
def select_f(pipe, bundle, source, row):
return source.dest_table.name
pipeline.select_partition = SelectPartitionFromSource(select_f)
@CaptureException
def _pre_download(self, gen_cls):
"""Override the ingestion process to download all of the input files at once. This resolves
the contention for the files that would occurr if many generators are trying to download
the same files all at once. """
from ambry_sources import download
cache = self.library.download_cache
source = self.source('b00001') # First; any one will do
g = gen_cls(self, source)
downloads = []
for spec1, spec2 in g.generate_source_specs():
downloads.append( (spec1.url, cache) )
# The two specs usually point to different files in the same zip archive, but I'm not sure
# that is always true.
if spec1.url != spec2.url:
downloads.append((spec2.url, cache))
# Multi-processing downloads might improve the speed, although probably not by much.
for url, cache in downloads:
self.log("Pre-downloading: {}".format(url))
download(url, cache)
class ACS2009Bundle(AcsBundle):
pass
class ACS2010Bundle(AcsBundle):
@CaptureException
def ingest(self, sources=None, tables=None, stage=None, force=False, update_tables=True):
"""Override the ingestion process to download all of the input files at once. This resolves
the contention for the files that would occurr if many generators are trying to download
the same files all at once. """
from.generator import ACS09TableRowGenerator
self._pre_download(ACS09TableRowGenerator)
return super(ACS2010Bundle, self).ingest(sources, tables, stage, force, update_tables)
|
CivicKnowledge/censuslib
|
censuslib/__init__.py
|
Python
|
mit
| 3,138 | 0.013384 |
#!/usr/bin/python
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.cli import CLI
from mininet.log import setLogLevel, info, debug
from mininet.node import Host, RemoteController, OVSSwitch
# Must exist and be owned by quagga user (quagga:quagga by default on Ubuntu)
QUAGGA_RUN_DIR = '/var/run/quagga'
QCONFIG_DIR = 'configs'
ZCONFIG_DIR = 'configs'
class SdnIpHost(Host):
def __init__(self, name, ip, route, *args, **kwargs):
Host.__init__(self, name, ip=ip, *args, **kwargs)
self.route = route
def config(self, **kwargs):
Host.config(self, **kwargs)
debug("configuring route %s" % self.route)
self.cmd('ip route add default via %s' % self.route)
class Router(Host):
def __init__(self, name, quaggaConfFile, zebraConfFile, intfDict, *args, **kwargs):
Host.__init__(self, name, *args, **kwargs)
self.quaggaConfFile = quaggaConfFile
self.zebraConfFile = zebraConfFile
self.intfDict = intfDict
def config(self, **kwargs):
Host.config(self, **kwargs)
self.cmd('sysctl net.ipv4.ip_forward=1')
for intf, attrs in self.intfDict.items():
self.cmd('ip addr flush dev %s' % intf)
# setup mac address to specific interface
if 'mac' in attrs:
self.cmd('ip link set %s down' % intf)
self.cmd('ip link set %s address %s' % (intf, attrs['mac']))
self.cmd('ip link set %s up ' % intf)
# setup address to interfaces
for addr in attrs['ipAddrs']:
self.cmd('ip addr add %s dev %s' % (addr, intf))
self.cmd('zebra -d -f %s -z %s/zebra%s.api -i %s/zebra%s.pid' % (self.zebraConfFile, QUAGGA_RUN_DIR, self.name, QUAGGA_RUN_DIR, self.name))
self.cmd('bgpd -d -f %s -z %s/zebra%s.api -i %s/bgpd%s.pid' % (self.quaggaConfFile, QUAGGA_RUN_DIR, self.name, QUAGGA_RUN_DIR, self.name))
def terminate(self):
self.cmd("ps ax | egrep 'bgpd%s.pid|zebra%s.pid' | awk '{print $1}' | xargs kill" % (self.name, self.name))
Host.terminate(self)
class SdnIpTopo(Topo):
def build(self):
zebraConf = '{}/zebra.conf'.format(ZCONFIG_DIR)
s1 = self.addSwitch('s1', dpid='0000000000000001', cls=OVSSwitch, failMode="standalone")
# Quagga 1
bgpEth0 = {
'mac': '00:00:00:00:00:01',
'ipAddrs': [
'10.0.1.1/24',
]
}
bgpIntfs = {
'bgpq1-eth0': bgpEth0
}
bgpq1 = self.addHost("bgpq1", cls=Router,
quaggaConfFile='{}/quagga1.conf'.format(QCONFIG_DIR),
zebraConfFile=zebraConf,
intfDict=bgpIntfs)
self.addLink(bgpq1, s1)
# Quagga 2
bgpEth0 = {
'mac': '00:00:00:00:00:02',
'ipAddrs': [
'10.0.2.1/24',
]
}
bgpIntfs = {
'bgpq2-eth0': bgpEth0
}
bgpq2 = self.addHost("bgpq2", cls=Router,
quaggaConfFile='{}/quagga2.conf'.format(QCONFIG_DIR),
zebraConfFile=zebraConf,
intfDict=bgpIntfs)
self.addLink(bgpq2, s1)
topos = {'sdnip': SdnIpTopo}
if __name__ == '__main__':
setLogLevel('debug')
topo = SdnIpTopo()
net = Mininet(topo=topo, controller=RemoteController)
net.start()
CLI(net)
net.stop()
info("done\n")
|
TakeshiTseng/SDN-Work
|
mininet/bgp/topo.py
|
Python
|
mit
| 3,549 | 0.001972 |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.nic.pw/status_available
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisNicPwStatusAvailable(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.nic.pw/status_available.txt"
host = "whois.nic.pw"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, [])
def test_available(self):
eq_(self.record.available, True)
def test_domain(self):
eq_(self.record.domain, None)
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(self.record.nameservers, [])
def test_admin_contacts(self):
eq_(self.record.admin_contacts.__class__.__name__, 'list')
eq_(self.record.admin_contacts, [])
def test_registered(self):
eq_(self.record.registered, False)
def test_created_on(self):
eq_(self.record.created_on, None)
def test_registrar(self):
eq_(self.record.registrar, None)
def test_registrant_contacts(self):
eq_(self.record.registrant_contacts.__class__.__name__, 'list')
eq_(self.record.registrant_contacts, [])
def test_technical_contacts(self):
eq_(self.record.technical_contacts.__class__.__name__, 'list')
eq_(self.record.technical_contacts, [])
def test_updated_on(self):
eq_(self.record.updated_on, None)
def test_domain_id(self):
eq_(self.record.domain_id, None)
def test_expires_on(self):
eq_(self.record.expires_on, None)
def test_disclaimer(self):
eq_(self.record.disclaimer, None)
|
huyphan/pyyawhois
|
test/record/parser/test_response_whois_nic_pw_status_available.py
|
Python
|
mit
| 2,000 | 0.003 |
#!/usr/bin/env python
'''
simple templating system for mavlink generator
Copyright Andrew Tridgell 2011
Released under GNU GPL version 3 or later
'''
from mavparse import MAVParseError
class MAVTemplate(object):
'''simple templating system'''
def __init__(self,
start_var_token="${",
end_var_token="}",
start_rep_token="${{",
end_rep_token="}}",
trim_leading_lf=True,
checkmissing=True):
self.start_var_token = start_var_token
self.end_var_token = end_var_token
self.start_rep_token = start_rep_token
self.end_rep_token = end_rep_token
self.trim_leading_lf = trim_leading_lf
self.checkmissing = checkmissing
def find_end(self, text, start_token, end_token):
'''find the of a token.
Returns the offset in the string immediately after the matching end_token'''
if not text.startswith(start_token):
raise MAVParseError("invalid token start")
offset = len(start_token)
nesting = 1
while nesting > 0:
idx1 = text[offset:].find(start_token)
idx2 = text[offset:].find(end_token)
if idx1 == -1 and idx2 == -1:
raise MAVParseError("token nesting error")
if idx1 == -1 or idx1 > idx2:
offset += idx2 + len(end_token)
nesting -= 1
else:
offset += idx1 + len(start_token)
nesting += 1
return offset
def find_var_end(self, text):
'''find the of a variable'''
return self.find_end(text, self.start_var_token, self.end_var_token)
def find_rep_end(self, text):
'''find the of a repitition'''
return self.find_end(text, self.start_rep_token, self.end_rep_token)
def substitute(self, text, subvars={},
trim_leading_lf=None, checkmissing=None):
'''substitute variables in a string'''
if trim_leading_lf is None:
trim_leading_lf = self.trim_leading_lf
if checkmissing is None:
checkmissing = self.checkmissing
# handle repititions
while True:
subidx = text.find(self.start_rep_token)
if subidx == -1:
break
endidx = self.find_rep_end(text[subidx:])
if endidx == -1:
raise MAVParseError("missing end macro in %s" % text[subidx:])
part1 = text[0:subidx]
part2 = text[subidx+len(self.start_rep_token):subidx+(endidx-len(self.end_rep_token))]
part3 = text[subidx+endidx:]
a = part2.split(':')
field_name = a[0]
rest = ':'.join(a[1:])
v = getattr(subvars, field_name, None)
if v is None:
raise MAVParseError('unable to find field %s' % field_name)
t1 = part1
for f in v:
t1 += self.substitute(rest, f, trim_leading_lf=False, checkmissing=False)
if len(v) != 0 and t1[-1] in ["\n", ","]:
t1 = t1[:-1]
t1 += part3
text = t1
if trim_leading_lf:
if text[0] == '\n':
text = text[1:]
while True:
idx = text.find(self.start_var_token)
if idx == -1:
return text
endidx = text[idx:].find(self.end_var_token)
if endidx == -1:
raise MAVParseError('missing end of variable: %s' % text[idx:idx+10])
varname = text[idx+2:idx+endidx]
if isinstance(subvars, dict):
if not varname in subvars:
if checkmissing:
raise MAVParseError("unknown variable in '%s%s%s'" % (
self.start_var_token, varname, self.end_var_token))
return text[0:idx+endidx] + self.substitute(text[idx+endidx:], subvars,
trim_leading_lf=False, checkmissing=False)
value = subvars[varname]
else:
value = getattr(subvars, varname, None)
if value is None:
if checkmissing:
raise MAVParseError("unknown variable in '%s%s%s'" % (
self.start_var_token, varname, self.end_var_token))
return text[0:idx+endidx] + self.substitute(text[idx+endidx:], subvars,
trim_leading_lf=False, checkmissing=False)
text = text.replace("%s%s%s" % (self.start_var_token, varname, self.end_var_token), str(value))
return text
def write(self, file, text, subvars={}, trim_leading_lf=True):
'''write to a file with variable substitution'''
file.write(self.substitute(text, subvars=subvars, trim_leading_lf=trim_leading_lf))
|
kd0aij/matrixpilot_old
|
Tools/MAVLink/mavlink/pymavlink/generator/mavtemplate.py
|
Python
|
gpl-3.0
| 5,130 | 0.003119 |
#!/usr/bin/env python
# Copyright 2020 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import heapq
import os
import platform
import random
import signal
import subprocess
# Base dir of the build products for Release and Debug.
OUT_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', 'out'))
def list_processes_linux():
"""Returns list of tuples (pid, command) of processes running in the same out
directory as this checkout.
"""
if platform.system() != 'Linux':
return []
try:
cmd = 'pgrep -fa %s' % OUT_DIR
output = subprocess.check_output(cmd, shell=True) or ''
processes = [
(int(line.split()[0]), line[line.index(OUT_DIR):])
for line in output.splitlines()
]
# Filter strange process with name as out dir.
return [p for p in processes if p[1] != OUT_DIR]
except:
return []
def kill_processes_linux():
"""Kill stray processes on the system that started in the same out directory.
All swarming tasks share the same out directory location.
"""
if platform.system() != 'Linux':
return
for pid, cmd in list_processes_linux():
try:
print('Attempting to kill %d - %s' % (pid, cmd))
os.kill(pid, signal.SIGKILL)
except:
pass
class FixedSizeTopList():
"""Utility collection for gathering a fixed number of elements with the
biggest value for the given key. It employs a heap from which we pop the
smallest element when the collection is 'full'.
If you need a reversed behaviour (collect min values) just provide an
inverse key."""
def __init__(self, size, key=None):
self.size = size
self.key = key or (lambda x: x)
self.data = []
self.discriminator = 0
def add(self, elem):
elem_k = self.key(elem)
heapq.heappush(self.data, (elem_k, self.extra_key(), elem))
if len(self.data) > self.size:
heapq.heappop(self.data)
def extra_key(self):
# Avoid key clash in tuples sent to the heap.
# We want to avoid comparisons on the last element of the tuple
# since those elements might not be comparable.
self.discriminator += 1
return self.discriminator
def as_list(self):
original_data = [rec for (_, _, rec) in self.data]
return sorted(original_data, key=self.key, reverse=True)
|
youtube/cobalt
|
third_party/v8/tools/testrunner/testproc/util.py
|
Python
|
bsd-3-clause
| 2,389 | 0.007535 |
# -*- coding: utf-8 -*-
from plugins import Plugin
from PyQt4 import QtCore, QtGui
import tempfile, codecs
import os, subprocess
class rst2pdf(Plugin):
name='rst2pdf'
shortcut='Ctrl+F8'
description='Run through rst2pdf and preview'
tmpf=None
def run(self):
print "Running rst2pdf"
text=unicode(self.client.editor.toPlainText())
# Save to a named file
if self.tmpf is None:
self.tmpf=tempfile.NamedTemporaryFile(delete=False)
self.tmpf.close()
f=codecs.open(self.tmpf.name,'w','utf-8')
f.write(text)
f.close()
# FIXME: unsafe
# FIXME: show output of the process somewhere
try:
self.client.notify('Running rst2pdf')
subprocess.check_call('rst2pdf %s'%self.tmpf.name, shell=True)
except subprocess.CalledProcessError:
#FIXME: show error dialog
return
# Open with default PDF viewer
# FIXME: would be awesome if we could know when this is still open
# and not launch it again, since it refreshes automatically.
self.client.notify('Launching PDF viewer')
QtGui.QDesktopServices.openUrl(QtCore.QUrl('file://'+self.tmpf.name+'.pdf'))
|
thegooglecodearchive/marave
|
marave/plugins/rst2pdf.py
|
Python
|
gpl-2.0
| 1,268 | 0.014984 |
"""HOOMD simulation format."""
import itertools
import operator
import warnings
from collections import namedtuple
import numpy as np
import parmed as pmd
import mbuild as mb
from mbuild.utils.conversion import RB_to_OPLS
from mbuild.utils.io import import_
from mbuild.utils.sorting import natural_sort
from .hoomd_snapshot import to_hoomdsnapshot
gsd = import_("gsd")
gsd.hoomd = import_("gsd.hoomd")
hoomd = import_("hoomd")
hoomd.md = import_("hoomd.md")
hoomd.md.pair = import_("hoomd.md.pair")
hoomd.md.special_pair = import_("hoomd.md.special_pair")
hoomd.md.charge = import_("hoomd.md.charge")
hoomd.md.bond = import_("hoomd.md.bond")
hoomd.md.angle = import_("hoomd.md.angle")
hoomd.md.dihedral = import_("hoomd.md.dihedral")
hoomd.group = import_("hoomd.group")
def create_hoomd_simulation(
structure,
ref_distance=1.0,
ref_mass=1.0,
ref_energy=1.0,
r_cut=1.2,
auto_scale=False,
snapshot_kwargs={},
pppm_kwargs={"Nx": 8, "Ny": 8, "Nz": 8, "order": 4},
init_snap=None,
restart=None,
):
"""Convert a parametrized pmd.Structure to hoomd.SimulationContext.
Parameters
----------
structure : parmed.Structure
ParmEd Structure object
ref_distance : float, optional, default=1.0
Reference distance for conversion to reduced units
ref_mass : float, optional, default=1.0
Reference mass for conversion to reduced units
ref_energy : float, optional, default=1.0
Reference energy for conversion to reduced units
r_cut : float, optional, default 1.2
Cutoff radius, in reduced units
auto_scale : bool, optional, default=False
Automatically use largest sigma value as ref_distance, largest mass
value as ref_mass, and largest epsilon value as ref_energy
snapshot_kwargs : dict
Kwargs to pass to to_hoomdsnapshot
pppm_kwargs : dict
Kwargs to pass to hoomd's pppm function
init_snap : hoomd.data.SnapshotParticleData, optional, default=None
Initial snapshot to which to add the ParmEd structure object
(useful for rigid bodies)
restart : str, optional, default=None
Path to the gsd file from which to restart the simulation.
https://hoomd-blue.readthedocs.io/en/v2.9.4/restartable-jobs.html
Note: It is assumed that the ParmEd structure and the system in
restart.gsd contain the same types. The ParmEd structure is still used
to initialize the forces, but restart.gsd is used to initialize the
system state (e.g., particle positions, momenta, etc).
Returns
-------
hoomd_objects : list
List of hoomd objects created during conversion
ReferenceValues : namedtuple
Values used in scaling
Notes
-----
While the hoomd objects are returned, the hoomd.SimulationContext is
accessible via `hoomd.context.current`. If you pass a non-parametrized
pmd.Structure, you will not have angle, dihedral, or force field
information. You may be better off creating a hoomd.Snapshot.
Reference units should be expected to convert parmed Structure units:
angstroms, kcal/mol, and daltons
"""
if isinstance(structure, mb.Compound):
raise ValueError(
"You passed mb.Compound to create_hoomd_simulation, there will be "
"no angles, dihedrals, or force field parameters. Please use "
"hoomd_snapshot.to_hoomdsnapshot to create a hoomd.Snapshot, then "
"create your own hoomd context and pass your hoomd.Snapshot to "
"hoomd.init.read_snapshot()"
)
elif not isinstance(structure, pmd.Structure):
raise ValueError(
"Please pass a parmed.Structure to create_hoomd_simulation"
)
_check_hoomd_version()
version_numbers = _check_hoomd_version()
if float(version_numbers[0]) >= 3:
warnings.warn(
"Warning when using Hoomd 3, potential API change where the hoomd "
"context is not updated upon creation of forces - utilize the "
"returned `hoomd_objects`"
)
hoomd_objects = [] # Potential adaptation for Hoomd v3 API
if auto_scale:
ref_mass = max([atom.mass for atom in structure.atoms])
pair_coeffs = list(
set((a.type, a.epsilon, a.sigma) for a in structure.atoms)
)
ref_energy = max(pair_coeffs, key=operator.itemgetter(1))[1]
ref_distance = max(pair_coeffs, key=operator.itemgetter(2))[2]
ReferenceValues = namedtuple("ref_values", ["distance", "mass", "energy"])
ref_values = ReferenceValues(ref_distance, ref_mass, ref_energy)
if not hoomd.context.current:
hoomd.context.initialize("")
if restart is None:
snapshot, _ = to_hoomdsnapshot(
structure,
ref_distance=ref_distance,
ref_mass=ref_mass,
ref_energy=ref_energy,
**snapshot_kwargs,
hoomd_snapshot=init_snap,
)
hoomd_objects.append(snapshot)
hoomd_system = hoomd.init.read_snapshot(snapshot)
hoomd_objects.append(hoomd_system)
else:
with gsd.hoomd.open(restart) as f:
snapshot = f[-1]
hoomd_objects.append(snapshot)
hoomd_system = hoomd.init.read_gsd(restart, restart=restart)
hoomd_objects.append(hoomd_system)
print("Simulation initialized from restart file")
nl = hoomd.md.nlist.cell()
nl.reset_exclusions(exclusions=["1-2", "1-3"])
hoomd_objects.append(nl)
if structure.atoms[0].type != "":
print("Processing LJ and QQ")
lj = _init_hoomd_lj(
structure,
nl,
r_cut=r_cut,
ref_distance=ref_distance,
ref_energy=ref_energy,
)
qq = _init_hoomd_qq(structure, nl, r_cut=r_cut, **pppm_kwargs)
hoomd_objects.append(lj)
hoomd_objects.append(qq)
if structure.adjusts:
print("Processing 1-4 interactions, adjusting neighborlist exclusions")
lj_14, qq_14 = _init_hoomd_14_pairs(
structure, nl, ref_distance=ref_distance, ref_energy=ref_energy
)
hoomd_objects.append(lj_14)
hoomd_objects.append(qq_14)
if structure.bond_types:
print("Processing harmonic bonds")
harmonic_bond = _init_hoomd_bonds(
structure, ref_distance=ref_distance, ref_energy=ref_energy
)
hoomd_objects.append(harmonic_bond)
if structure.angle_types:
print("Processing harmonic angles")
harmonic_angle = _init_hoomd_angles(structure, ref_energy=ref_energy)
hoomd_objects.append(harmonic_angle)
if structure.dihedral_types:
print("Processing periodic torsions")
periodic_torsions = _init_hoomd_dihedrals(
structure, ref_energy=ref_energy
)
hoomd_objects.append(periodic_torsions)
if structure.rb_torsion_types:
print("Processing RB torsions")
rb_torsions = _init_hoomd_rb_torsions(structure, ref_energy=ref_energy)
hoomd_objects.append(rb_torsions)
print("HOOMD SimulationContext updated from ParmEd Structure")
return hoomd_objects, ref_values
def _init_hoomd_lj(structure, nl, r_cut=1.2, ref_distance=1.0, ref_energy=1.0):
"""LJ parameters."""
# Identify the unique atom types before setting
atom_type_params = {}
for atom in structure.atoms:
if atom.type not in atom_type_params:
atom_type_params[atom.type] = atom.atom_type
# Set the hoomd parameters for self-interactions
lj = hoomd.md.pair.lj(r_cut, nl)
for name, atom_type in atom_type_params.items():
lj.pair_coeff.set(
name,
name,
sigma=atom_type.sigma / ref_distance,
epsilon=atom_type.epsilon / ref_energy,
)
# Cross interactions, mixing rules, NBfixes
all_atomtypes = sorted(atom_type_params.keys())
for a1, a2 in itertools.combinations_with_replacement(all_atomtypes, 2):
nb_fix_info = atom_type_params[a1].nbfix.get(a2, None)
# nb_fix_info = (rmin, eps, rmin14, eps14)
if nb_fix_info is None:
# No nbfix means use mixing rule to find cross-interaction
if structure.combining_rule == "lorentz":
sigma = (
atom_type_params[a1].sigma + atom_type_params[a2].sigma
) / (2 * ref_distance)
epsilon = (
(
atom_type_params[a1].epsilon
* atom_type_params[a2].epsilon
)
/ ref_energy ** 2
) ** 0.5
elif structure.combining_rule == "geometric":
sigma = (
(atom_type_params[a1].sigma * atom_type_params[a2].sigma)
/ ref_distance ** 2
) ** 0.5
epsilon = (
(
atom_type_params[a1].epsilon
* atom_type_params[a2].epsilon
)
/ ref_energy ** 2
) ** 0.5
else:
raise ValueError(
f"Mixing rule {structure.combining_rule} not supported, "
"use lorentz"
)
else:
# If we have nbfix info, use it
sigma = nb_fix_info[0] / (ref_distance * (2 ** (1 / 6)))
epsilon = nb_fix_info[1] / ref_energy
lj.pair_coeff.set(a1, a2, sigma=sigma, epsilon=epsilon)
return lj
def _init_hoomd_qq(structure, nl, Nx=1, Ny=1, Nz=1, order=4, r_cut=1.2):
"""Charge interactions."""
charged = hoomd.group.charged()
if len(charged) == 0:
print("No charged groups found, ignoring electrostatics")
return None
else:
qq = hoomd.md.charge.pppm(charged, nl)
qq.set_params(Nx, Ny, Nz, order, r_cut)
return qq
def _init_hoomd_14_pairs(
structure, nl, r_cut=1.2, ref_distance=1.0, ref_energy=1.0
):
"""Special_pairs to handle 14 scalings.
See discussion: https://groups.google.com/forum/#!topic/hoomd-users/
iZ9WCpHczg0
"""
# Update neighborlist to exclude 1-4 interactions,
# but impose a special_pair force to handle these pairs
nl.exclusions.append("1-4")
if hoomd.context.current.system_definition.getPairData().getN() == 0:
print("No 1,4 pairs found in hoomd snapshot")
return None, None
lj_14 = hoomd.md.special_pair.lj()
qq_14 = hoomd.md.special_pair.coulomb()
params_14 = {}
# Identify unique 14 scalings
for adjust in structure.adjusts:
t1 = adjust.atom1.type
t2 = adjust.atom2.type
ps = "-".join(sorted([t1, t2]))
if ps not in params_14:
params_14[ps] = adjust.type
for name, adjust_type in params_14.items():
lj_14.pair_coeff.set(
name,
sigma=adjust_type.sigma / ref_distance,
# The adjust epsilon already carries the scaling
epsilon=adjust_type.epsilon / ref_energy,
# Do NOT use hoomd's alpha to modify any LJ terms
alpha=1,
r_cut=r_cut,
)
qq_14.pair_coeff.set(name, alpha=adjust_type.chgscale, r_cut=r_cut)
return lj_14, qq_14
def _init_hoomd_bonds(structure, ref_distance=1.0, ref_energy=1.0):
"""Harmonic bonds."""
# Identify the unique bond types before setting
bond_type_params = {}
for bond in structure.bonds:
t1, t2 = bond.atom1.type, bond.atom2.type
t1, t2 = sorted([t1, t2], key=natural_sort)
if t1 != "" and t2 != "":
bond_type = "-".join((t1, t2))
if bond_type not in bond_type_params:
bond_type_params[bond_type] = bond.type
# Set the hoomd parameters
harmonic_bond = hoomd.md.bond.harmonic()
for name, bond_type in bond_type_params.items():
# A (paramerized) parmed structure with no bondtype
# is because of constraints
if bond_type is None:
print("Bond with no bondtype detected, setting coefficients to 0")
harmonic_bond.bond_coeff.set(name, k=0, r0=0)
else:
harmonic_bond.bond_coeff.set(
name,
k=2 * bond_type.k * ref_distance ** 2 / ref_energy,
r0=bond_type.req / ref_distance,
)
return harmonic_bond
def _init_hoomd_angles(structure, ref_energy=1.0):
"""Harmonic angles."""
# Identify the unique angle types before setting
angle_type_params = {}
for angle in structure.angles:
t1, t2, t3 = angle.atom1.type, angle.atom2.type, angle.atom3.type
t1, t3 = sorted([t1, t3], key=natural_sort)
angle_type = "-".join((t1, t2, t3))
if angle_type not in angle_type_params:
angle_type_params[angle_type] = angle.type
# set the hoomd parameters
harmonic_angle = hoomd.md.angle.harmonic()
for name, angle_type in angle_type_params.items():
harmonic_angle.angle_coeff.set(
name,
t0=np.deg2rad(angle_type.theteq),
k=2 * angle_type.k / ref_energy,
)
return harmonic_angle
def _init_hoomd_dihedrals(structure, ref_energy=1.0):
"""Periodic dihedrals (dubbed harmonic dihedrals in HOOMD)."""
# Identify the unique dihedral types before setting
# need Hoomd 2.8.0 to use proper dihedral implemtnation
# from this PR https://github.com/glotzerlab/hoomd-blue/pull/492
version_numbers = _check_hoomd_version()
if float(version_numbers[0]) < 2 or float(version_numbers[1]) < 8:
from mbuild.exceptions import MBuildError
raise MBuildError("Please upgrade Hoomd to at least 2.8.0")
dihedral_type_params = {}
for dihedral in structure.dihedrals:
t1, t2 = dihedral.atom1.type, dihedral.atom2.type
t3, t4 = dihedral.atom3.type, dihedral.atom4.type
if [t2, t3] == sorted([t2, t3], key=natural_sort):
dihedral_type = "-".join((t1, t2, t3, t4))
else:
dihedral_type = "-".join((t4, t3, t2, t1))
if dihedral_type not in dihedral_type_params:
if isinstance(dihedral.type, pmd.DihedralType):
dihedral_type_params[dihedral_type] = dihedral.type
elif isinstance(dihedral.type, pmd.DihedralTypeList):
if len(dihedral.type) > 1:
warnings.warn(
"Multiple dihedral types detected for single dihedral, "
"will ignore all except first dihedral type. First "
"dihedral type: {}".format(dihedral.type[0])
)
dihedral_type_params[dihedral_type] = dihedral.type[0]
# Set the hoomd parameters
# These are periodic torsions
periodic_torsion = hoomd.md.dihedral.harmonic()
for name, dihedral_type in dihedral_type_params.items():
periodic_torsion.dihedral_coeff.set(
name,
k=2 * dihedral_type.phi_k / ref_energy,
d=1,
n=dihedral_type.per,
phi_0=np.deg2rad(dihedral_type.phase),
)
return periodic_torsion
def _init_hoomd_rb_torsions(structure, ref_energy=1.0):
"""RB dihedrals (implemented as OPLS dihedrals in HOOMD)."""
# Identify the unique dihedral types before setting
dihedral_type_params = {}
for dihedral in structure.rb_torsions:
t1, t2 = dihedral.atom1.type, dihedral.atom2.type
t3, t4 = dihedral.atom3.type, dihedral.atom4.type
if [t2, t3] == sorted([t2, t3], key=natural_sort):
dihedral_type = "-".join((t1, t2, t3, t4))
else:
dihedral_type = "-".join((t4, t3, t2, t1))
if dihedral_type not in dihedral_type_params:
dihedral_type_params[dihedral_type] = dihedral.type
# Set the hoomd parameter
rb_torsion = hoomd.md.dihedral.opls()
for name, dihedral_type in dihedral_type_params.items():
F_coeffs = RB_to_OPLS(
dihedral_type.c0 / ref_energy,
dihedral_type.c1 / ref_energy,
dihedral_type.c2 / ref_energy,
dihedral_type.c3 / ref_energy,
dihedral_type.c4 / ref_energy,
dihedral_type.c5 / ref_energy,
)
rb_torsion.dihedral_coeff.set(
name, k1=F_coeffs[0], k2=F_coeffs[1], k3=F_coeffs[2], k4=F_coeffs[3]
)
return rb_torsion
def _check_hoomd_version():
version = hoomd.__version__
version_numbers = version.split(".")
return version_numbers
|
iModels/mbuild
|
mbuild/formats/hoomd_simulation.py
|
Python
|
mit
| 16,626 | 0.00012 |
"""
WSGI config for kanban project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "kanban.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kanban.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
clione/django-kanban
|
src/kanban/wsgi.py
|
Python
|
mit
| 1,419 | 0.000705 |
#!/usr/bin/env python
# The really simple Python version of Qwt-5.0.0/examples/simple
# for debugging, requires: python configure.py --trace ...
if False:
import sip
sip.settracemask(0x3f)
import sys
import qt
import Qwt5 as Qwt
from Qwt5.anynumpy import *
class SimplePlot(Qwt.QwtPlot):
def __init__(self, *args):
Qwt.QwtPlot.__init__(self, *args)
# make a QwtPlot widget
self.setTitle('ReallySimpleDemo.py')
self.insertLegend(Qwt.QwtLegend(), Qwt.QwtPlot.RightLegend)
# set axis titles
self.setAxisTitle(Qwt.QwtPlot.xBottom, 'x -->')
self.setAxisTitle(Qwt.QwtPlot.yLeft, 'y -->')
# insert a few curves
cSin = Qwt.QwtPlotCurve('y = sin(x)')
cSin.setPen(qt.QPen(qt.Qt.red))
cSin.attach(self)
cCos = Qwt.QwtPlotCurve('y = cos(x)')
cCos.setPen(qt.QPen(qt.Qt.blue))
cCos.attach(self)
# make a Numeric array for the horizontal data
x = arange(0.0, 10.0, 0.1)
# initialize the data
cSin.setData(x, sin(x))
cCos.setData(x, cos(x))
# insert a horizontal marker at y = 0
mY = Qwt.QwtPlotMarker()
mY.setLabel(Qwt.QwtText('y = 0'))
mY.setLabelAlignment(qt.Qt.AlignRight | qt.Qt.AlignTop)
mY.setLineStyle(Qwt.QwtPlotMarker.HLine)
mY.setYValue(0.0)
mY.attach(self)
# insert a vertical marker at x = 2 pi
mX = Qwt.QwtPlotMarker()
mX.setLabel(Qwt.QwtText('x = 2 pi'))
mX.setLabelAlignment(qt.Qt.AlignRight | qt.Qt.AlignTop)
mX.setLineStyle(Qwt.QwtPlotMarker.VLine)
mX.setXValue(2*pi)
mX.attach(self)
# replot
self.replot()
# __init__()
# class Plot
def make():
demo = SimplePlot()
demo.resize(500, 300)
demo.show()
return demo
# make()
def main(args):
app = qt.QApplication(args)
demo = make()
app.setMainWidget(demo)
sys.exit(app.exec_loop())
# main()
# Admire
if __name__ == '__main__':
main(sys.argv)
# Local Variables: ***
# mode: python ***
# End: ***
|
PyQwt/PyQwt5
|
qt3examples/ReallySimpleDemo.py
|
Python
|
gpl-2.0
| 2,005 | 0.015461 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.util.dirutil import safe_mkdir_for
class ReproMixin(object):
""" Additional helper methods for use in Repro tests"""
def add_file(self, root, path, content):
"""Add a file with specified contents
:param str root: Root directory for path.
:param str path: Path relative to root.
:param str content: Content to write to file.
"""
fullpath = os.path.join(root, path)
safe_mkdir_for(fullpath)
with open(fullpath, 'w') as outfile:
outfile.write(content)
def assert_not_exists(self, root, path):
"""Assert a file at relpath doesn't exist
:param str root: Root directory of path.
:param str path: Path relative to tar.gz.
:return: bool
"""
fullpath = os.path.join(root, path)
self.assertFalse(os.path.exists(fullpath))
def assert_file(self, root, path, expected_content=None):
""" Assert that a file exists with the content specified
:param str root: Root directory of path.
:param str path: Path relative to tar.gz.
:param str expected_content: file contents.
:return: bool
"""
fullpath = os.path.join(root, path)
self.assertTrue(os.path.isfile(fullpath))
if expected_content:
with open(fullpath, 'r') as infile:
content = infile.read()
self.assertEqual(expected_content, content)
|
baroquebobcat/pants
|
tests/python/pants_test/init/repro_mixin.py
|
Python
|
apache-2.0
| 1,635 | 0.004893 |
import genxmlif
from genxmlif.xmlifODict import odict
xmlIf = genxmlif.chooseXmlIf(genxmlif.XMLIF_ELEMENTTREE)
xmlTree = xmlIf.createXmlTree(None, "testTree", {"rootAttr1":"RootAttr1"})
xmlRootNode = xmlTree.getRootNode()
myDict = odict( (("childTag1","123"), ("childTag2","123")) )
xmlRootNode.appendChild("childTag", myDict)
xmlRootNode.appendChild("childTag", {"childTag1":"123456", "childTag2":"123456"})
xmlRootNode.appendChild("childTag", {"childTag1":"123456789", "childTag3":"1234", "childTag2":"123456789"})
xmlRootNode.appendChild("childTag", {"childTag1":"1", "childTag2":"1"})
print xmlTree.printTree(prettyPrint=1)
print xmlTree
print xmlTree.getRootNode()
|
UgCS/vsm-cpp-sdk
|
tools/mavgen/lib/genxmlif/xmliftest.py
|
Python
|
bsd-3-clause
| 670 | 0.022388 |
from jsbuild.attrdict import AttrDict
from time import strftime
class Manifest(AttrDict):
def __init__(self,*args,**kwargs):
super(AttrDict, self).__init__(*args,**kwargs)
self._buffer_ = None
self._parent_ = None
if not self.__contains__('_dict_'):
self['_dict_'] = {}
self['_dict_']['timestamp'] = int(strftime("%Y%m%d%H%M"))
def __getitem__(self,name):
item = super(Manifest,self).__getitem__(name)
if isinstance(item,Manifest) and not item._parent_:
item._parent_ = self
elif isinstance(item,str):
root = self
while root._parent_: root = root._parent_
item = item%root._dict_
return item
|
azer/jsbuild
|
jsbuild/manifest.py
|
Python
|
mit
| 673 | 0.028232 |
import sys
import unittest
sys.path.append('../../')
import lib.event
class TestEvents(unittest.TestCase):
def setUp(self):
TestEvents.successful = False
TestEvents.successful2 = False
def test_subscribe(self):
@lib.event.subscribe('test')
def subscribe_test():
TestEvents.successful = True
lib.event.call('test')
self.assertTrue(TestEvents.successful)
def test_subscribe_with_params(self):
@lib.event.subscribe('test2')
def subscribe_test(successful=False):
TestEvents.successful = successful
lib.event.call('test2', {'successful': True})
self.assertTrue(TestEvents.successful)
def test_subscribe_two_with_params(self):
@lib.event.subscribe('test3')
def subscribe_test(successful=False):
TestEvents.successful = successful
@lib.event.subscribe('test3')
def subscribe_test2(successful=False):
TestEvents.successful2 = successful
lib.event.call('test3', {'successful': True})
self.assertTrue(TestEvents.successful)
self.assertTrue(TestEvents.successful2)
if __name__ == '__main__':
unittest.main()
|
Javex/qllbot
|
tests/lib_tests/events.py
|
Python
|
bsd-2-clause
| 1,213 | 0.000824 |
# -*- coding: utf-8 -*-
# import sqlite3 as sqlite
import sys
import uuid
from pysqlcipher3 import dbapi2 as sqlite
def main():
print("***************** Welcome to OSS DataMaster-Rigster System *******************")
print("* *")
print("******************************************************************************")
conn = sqlite.connect('DataMasterSystem.db')
c = conn.cursor()
c.execute("PRAGMA key='data_master_system'") # 对加密的sqlite文件进行解密
try:
c.execute('create table data_master_system (data_master_name text, password text, unique_id text)')
except sqlite.OperationalError as e:
pass
unique_id = uuid.uuid1()
data_masters = c.execute("select * from data_master_system").fetchall()
if len(data_masters) != 0:
data_master_name = input("[*] Input your data master name:\n")
for col in data_masters:
if data_master_name.strip() == col[0]:
print("[!] Data Master Name has existed!")
print("******************************************************************************")
print("* *")
print("*********************** Data Master Rigster Is Failed! ***********************")
sys.exit(-1)
else:
data_master_name = input("[*] Input your data master name:\n")
password = input("[*] Input your password:\n")
repeat_password = input("[*] Input your password again:\n")
if password.strip() != repeat_password.strip():
print("[!] Password is not equal to RePassword!")
print("******************************************************************************")
print("* *")
print("*********************** Data Master Rigster Is Failed! ***********************")
sys.exit(-1)
c.execute('insert into data_master_system values ("{}", "{}", "{}")'.format(data_master_name, password, unique_id))
conn.commit()
c.close()
print("******************************************************************************")
print("* *")
print("********************* Data Master Rigster Is Successful! *********************")
if __name__ == '__main__':
main()
|
summychou/CSForOSS
|
CA/OSSQt_DataMasterRigster.py
|
Python
|
mit
| 2,535 | 0.008748 |
import sys
import pytest
import numpy as np
import xgboost as xgb
from xgboost.compat import PANDAS_INSTALLED
from hypothesis import given, strategies, assume, settings
if PANDAS_INSTALLED:
from hypothesis.extra.pandas import column, data_frames, range_indexes
else:
def noop(*args, **kwargs):
pass
column, data_frames, range_indexes = noop, noop, noop
sys.path.append("tests/python")
import testing as tm
from test_predict import run_threaded_predict # noqa
from test_predict import run_predict_leaf # noqa
rng = np.random.RandomState(1994)
shap_parameter_strategy = strategies.fixed_dictionaries({
'max_depth': strategies.integers(1, 11),
'max_leaves': strategies.integers(0, 256),
'num_parallel_tree': strategies.sampled_from([1, 10]),
}).filter(lambda x: x['max_depth'] > 0 or x['max_leaves'] > 0)
predict_parameter_strategy = strategies.fixed_dictionaries({
'max_depth': strategies.integers(1, 8),
'num_parallel_tree': strategies.sampled_from([1, 4]),
})
class TestGPUPredict:
def test_predict(self):
iterations = 10
np.random.seed(1)
test_num_rows = [10, 1000, 5000]
test_num_cols = [10, 50, 500]
# This test passes for tree_method=gpu_hist and tree_method=exact. but
# for `hist` and `approx` the floating point error accumulates faster
# and fails even tol is set to 1e-4. For `hist`, the mismatching rate
# with 5000 rows is 0.04.
for num_rows in test_num_rows:
for num_cols in test_num_cols:
dtrain = xgb.DMatrix(np.random.randn(num_rows, num_cols),
label=[0, 1] * int(num_rows / 2))
dval = xgb.DMatrix(np.random.randn(num_rows, num_cols),
label=[0, 1] * int(num_rows / 2))
dtest = xgb.DMatrix(np.random.randn(num_rows, num_cols),
label=[0, 1] * int(num_rows / 2))
watchlist = [(dtrain, 'train'), (dval, 'validation')]
res = {}
param = {
"objective": "binary:logistic",
"predictor": "gpu_predictor",
'eval_metric': 'logloss',
'tree_method': 'gpu_hist',
'max_depth': 1
}
bst = xgb.train(param, dtrain, iterations, evals=watchlist,
evals_result=res)
assert self.non_increasing(res["train"]["logloss"])
gpu_pred_train = bst.predict(dtrain, output_margin=True)
gpu_pred_test = bst.predict(dtest, output_margin=True)
gpu_pred_val = bst.predict(dval, output_margin=True)
param["predictor"] = "cpu_predictor"
bst_cpu = xgb.train(param, dtrain, iterations, evals=watchlist)
cpu_pred_train = bst_cpu.predict(dtrain, output_margin=True)
cpu_pred_test = bst_cpu.predict(dtest, output_margin=True)
cpu_pred_val = bst_cpu.predict(dval, output_margin=True)
np.testing.assert_allclose(cpu_pred_train, gpu_pred_train,
rtol=1e-6)
np.testing.assert_allclose(cpu_pred_val, gpu_pred_val,
rtol=1e-6)
np.testing.assert_allclose(cpu_pred_test, gpu_pred_test,
rtol=1e-6)
def non_increasing(self, L):
return all((y - x) < 0.001 for x, y in zip(L, L[1:]))
# Test case for a bug where multiple batch predictions made on a
# test set produce incorrect results
@pytest.mark.skipif(**tm.no_sklearn())
def test_multi_predict(self):
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
n = 1000
X, y = make_regression(n, random_state=rng)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=123)
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test)
params = {}
params["tree_method"] = "gpu_hist"
params['predictor'] = "gpu_predictor"
bst_gpu_predict = xgb.train(params, dtrain)
params['predictor'] = "cpu_predictor"
bst_cpu_predict = xgb.train(params, dtrain)
predict0 = bst_gpu_predict.predict(dtest)
predict1 = bst_gpu_predict.predict(dtest)
cpu_predict = bst_cpu_predict.predict(dtest)
assert np.allclose(predict0, predict1)
assert np.allclose(predict0, cpu_predict)
@pytest.mark.skipif(**tm.no_sklearn())
def test_sklearn(self):
m, n = 15000, 14
tr_size = 2500
X = np.random.rand(m, n)
y = 200 * np.matmul(X, np.arange(-3, -3 + n))
X_train, y_train = X[:tr_size, :], y[:tr_size]
X_test, y_test = X[tr_size:, :], y[tr_size:]
# First with cpu_predictor
params = {'tree_method': 'gpu_hist',
'predictor': 'cpu_predictor',
'n_jobs': -1,
'seed': 123}
m = xgb.XGBRegressor(**params).fit(X_train, y_train)
cpu_train_score = m.score(X_train, y_train)
cpu_test_score = m.score(X_test, y_test)
# Now with gpu_predictor
params['predictor'] = 'gpu_predictor'
m = xgb.XGBRegressor(**params).fit(X_train, y_train)
gpu_train_score = m.score(X_train, y_train)
gpu_test_score = m.score(X_test, y_test)
assert np.allclose(cpu_train_score, gpu_train_score)
assert np.allclose(cpu_test_score, gpu_test_score)
def run_inplace_base_margin(self, booster, dtrain, X, base_margin):
import cupy as cp
dtrain.set_info(base_margin=base_margin)
from_inplace = booster.inplace_predict(data=X, base_margin=base_margin)
from_dmatrix = booster.predict(dtrain)
cp.testing.assert_allclose(from_inplace, from_dmatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_inplace_predict_cupy(self):
import cupy as cp
cp.cuda.runtime.setDevice(0)
rows = 1000
cols = 10
missing = 11 # set to integer for testing
cp_rng = cp.random.RandomState(1994)
cp.random.set_random_state(cp_rng)
X = cp.random.randn(rows, cols)
missing_idx = [i for i in range(0, cols, 4)]
X[:, missing_idx] = missing # set to be missing
y = cp.random.randn(rows)
dtrain = xgb.DMatrix(X, y)
booster = xgb.train({'tree_method': 'gpu_hist'}, dtrain, num_boost_round=10)
test = xgb.DMatrix(X[:10, ...], missing=missing)
predt_from_array = booster.inplace_predict(X[:10, ...], missing=missing)
predt_from_dmatrix = booster.predict(test)
cp.testing.assert_allclose(predt_from_array, predt_from_dmatrix)
def predict_dense(x):
inplace_predt = booster.inplace_predict(x)
d = xgb.DMatrix(x)
copied_predt = cp.array(booster.predict(d))
return cp.all(copied_predt == inplace_predt)
# Don't do this on Windows, see issue #5793
if sys.platform.startswith("win"):
pytest.skip(
'Multi-threaded in-place prediction with cuPy is not working on Windows')
for i in range(10):
run_threaded_predict(X, rows, predict_dense)
base_margin = cp_rng.randn(rows)
self.run_inplace_base_margin(booster, dtrain, X, base_margin)
# Create a wide dataset
X = cp_rng.randn(100, 10000)
y = cp_rng.randn(100)
missing_idx = [i for i in range(0, X.shape[1], 16)]
X[:, missing_idx] = missing
reg = xgb.XGBRegressor(tree_method="gpu_hist", n_estimators=8, missing=missing)
reg.fit(X, y)
gpu_predt = reg.predict(X)
reg.set_params(predictor="cpu_predictor")
cpu_predt = reg.predict(X)
np.testing.assert_allclose(gpu_predt, cpu_predt, atol=1e-6)
@pytest.mark.skipif(**tm.no_cupy())
@pytest.mark.skipif(**tm.no_cudf())
def test_inplace_predict_cudf(self):
import cupy as cp
import cudf
import pandas as pd
rows = 1000
cols = 10
rng = np.random.RandomState(1994)
cp.cuda.runtime.setDevice(0)
X = rng.randn(rows, cols)
X = pd.DataFrame(X)
y = rng.randn(rows)
X = cudf.from_pandas(X)
dtrain = xgb.DMatrix(X, y)
booster = xgb.train({'tree_method': 'gpu_hist'},
dtrain, num_boost_round=10)
test = xgb.DMatrix(X)
predt_from_array = booster.inplace_predict(X)
predt_from_dmatrix = booster.predict(test)
cp.testing.assert_allclose(predt_from_array, predt_from_dmatrix)
def predict_df(x):
# column major array
inplace_predt = booster.inplace_predict(x.values)
d = xgb.DMatrix(x)
copied_predt = cp.array(booster.predict(d))
assert cp.all(copied_predt == inplace_predt)
inplace_predt = booster.inplace_predict(x)
return cp.all(copied_predt == inplace_predt)
for i in range(10):
run_threaded_predict(X, rows, predict_df)
base_margin = cudf.Series(rng.randn(rows))
self.run_inplace_base_margin(booster, dtrain, X, base_margin)
@given(strategies.integers(1, 10),
tm.dataset_strategy, shap_parameter_strategy)
@settings(deadline=None)
def test_shap(self, num_rounds, dataset, param):
param.update({"predictor": "gpu_predictor", "gpu_id": 0})
param = dataset.set_params(param)
dmat = dataset.get_dmat()
bst = xgb.train(param, dmat, num_rounds)
test_dmat = xgb.DMatrix(dataset.X, dataset.y, dataset.w, dataset.margin)
shap = bst.predict(test_dmat, pred_contribs=True)
margin = bst.predict(test_dmat, output_margin=True)
assume(len(dataset.y) > 0)
assert np.allclose(np.sum(shap, axis=len(shap.shape) - 1), margin, 1e-3, 1e-3)
@given(strategies.integers(1, 10),
tm.dataset_strategy, shap_parameter_strategy)
@settings(deadline=None, max_examples=20)
def test_shap_interactions(self, num_rounds, dataset, param):
param.update({"predictor": "gpu_predictor", "gpu_id": 0})
param = dataset.set_params(param)
dmat = dataset.get_dmat()
bst = xgb.train(param, dmat, num_rounds)
test_dmat = xgb.DMatrix(dataset.X, dataset.y, dataset.w, dataset.margin)
shap = bst.predict(test_dmat, pred_interactions=True)
margin = bst.predict(test_dmat, output_margin=True)
assume(len(dataset.y) > 0)
assert np.allclose(np.sum(shap, axis=(len(shap.shape) - 1, len(shap.shape) - 2)),
margin,
1e-3, 1e-3)
def test_shap_categorical(self):
X, y = tm.make_categorical(100, 20, 7, False)
Xy = xgb.DMatrix(X, y, enable_categorical=True)
booster = xgb.train({"tree_method": "gpu_hist"}, Xy, num_boost_round=10)
booster.set_param({"predictor": "gpu_predictor"})
shap = booster.predict(Xy, pred_contribs=True)
margin = booster.predict(Xy, output_margin=True)
np.testing.assert_allclose(
np.sum(shap, axis=len(shap.shape) - 1), margin, rtol=1e-3
)
booster.set_param({"predictor": "cpu_predictor"})
shap = booster.predict(Xy, pred_contribs=True)
margin = booster.predict(Xy, output_margin=True)
np.testing.assert_allclose(
np.sum(shap, axis=len(shap.shape) - 1), margin, rtol=1e-3
)
def test_predict_leaf_basic(self):
gpu_leaf = run_predict_leaf('gpu_predictor')
cpu_leaf = run_predict_leaf('cpu_predictor')
np.testing.assert_equal(gpu_leaf, cpu_leaf)
def run_predict_leaf_booster(self, param, num_rounds, dataset):
param = dataset.set_params(param)
m = dataset.get_dmat()
booster = xgb.train(param, dtrain=dataset.get_dmat(), num_boost_round=num_rounds)
booster.set_param({'predictor': 'cpu_predictor'})
cpu_leaf = booster.predict(m, pred_leaf=True)
booster.set_param({'predictor': 'gpu_predictor'})
gpu_leaf = booster.predict(m, pred_leaf=True)
np.testing.assert_equal(cpu_leaf, gpu_leaf)
@given(predict_parameter_strategy, tm.dataset_strategy)
@settings(deadline=None)
def test_predict_leaf_gbtree(self, param, dataset):
param['booster'] = 'gbtree'
param['tree_method'] = 'gpu_hist'
self.run_predict_leaf_booster(param, 10, dataset)
@given(predict_parameter_strategy, tm.dataset_strategy)
@settings(deadline=None)
def test_predict_leaf_dart(self, param, dataset):
param['booster'] = 'dart'
param['tree_method'] = 'gpu_hist'
self.run_predict_leaf_booster(param, 10, dataset)
@pytest.mark.skipif(**tm.no_sklearn())
@pytest.mark.skipif(**tm.no_pandas())
@given(df=data_frames([column('x0', elements=strategies.integers(min_value=0, max_value=3)),
column('x1', elements=strategies.integers(min_value=0, max_value=5))],
index=range_indexes(min_size=20, max_size=50)))
@settings(deadline=None)
def test_predict_categorical_split(self, df):
from sklearn.metrics import mean_squared_error
df = df.astype('category')
x0, x1 = df['x0'].to_numpy(), df['x1'].to_numpy()
y = (x0 * 10 - 20) + (x1 - 2)
dtrain = xgb.DMatrix(df, label=y, enable_categorical=True)
params = {
'tree_method': 'gpu_hist', 'predictor': 'gpu_predictor',
'max_depth': 3, 'learning_rate': 1.0, 'base_score': 0.0, 'eval_metric': 'rmse'
}
eval_history = {}
bst = xgb.train(params, dtrain, num_boost_round=5, evals=[(dtrain, 'train')],
verbose_eval=False, evals_result=eval_history)
pred = bst.predict(dtrain)
rmse = mean_squared_error(y_true=y, y_pred=pred, squared=False)
np.testing.assert_almost_equal(rmse, eval_history['train']['rmse'][-1], decimal=5)
@pytest.mark.skipif(**tm.no_cupy())
@pytest.mark.parametrize("n_classes", [2, 3])
def test_predict_dart(self, n_classes):
from sklearn.datasets import make_classification
import cupy as cp
n_samples = 1000
X_, y_ = make_classification(
n_samples=n_samples, n_informative=5, n_classes=n_classes
)
X, y = cp.array(X_), cp.array(y_)
Xy = xgb.DMatrix(X, y)
if n_classes == 2:
params = {
"tree_method": "gpu_hist",
"booster": "dart",
"rate_drop": 0.5,
"objective": "binary:logistic"
}
else:
params = {
"tree_method": "gpu_hist",
"booster": "dart",
"rate_drop": 0.5,
"objective": "multi:softprob",
"num_class": n_classes
}
booster = xgb.train(params, Xy, num_boost_round=32)
# predictor=auto
inplace = booster.inplace_predict(X)
copied = booster.predict(Xy)
cpu_inplace = booster.inplace_predict(X_)
booster.set_param({"predictor": "cpu_predictor"})
cpu_copied = booster.predict(Xy)
copied = cp.array(copied)
cp.testing.assert_allclose(cpu_inplace, copied, atol=1e-6)
cp.testing.assert_allclose(cpu_copied, copied, atol=1e-6)
cp.testing.assert_allclose(inplace, copied, atol=1e-6)
booster.set_param({"predictor": "gpu_predictor"})
inplace = booster.inplace_predict(X)
copied = booster.predict(Xy)
copied = cp.array(copied)
cp.testing.assert_allclose(inplace, copied, atol=1e-6)
@pytest.mark.skipif(**tm.no_cupy())
def test_dtypes(self):
import cupy as cp
rows = 1000
cols = 10
rng = cp.random.RandomState(1994)
orig = rng.randint(low=0, high=127, size=rows * cols).reshape(
rows, cols
)
y = rng.randint(low=0, high=127, size=rows)
dtrain = xgb.DMatrix(orig, label=y)
booster = xgb.train({"tree_method": "gpu_hist"}, dtrain)
predt_orig = booster.inplace_predict(orig)
# all primitive types in numpy
for dtype in [
cp.signedinteger,
cp.byte,
cp.short,
cp.intc,
cp.int_,
cp.longlong,
cp.unsignedinteger,
cp.ubyte,
cp.ushort,
cp.uintc,
cp.uint,
cp.ulonglong,
cp.floating,
cp.half,
cp.single,
cp.double,
]:
X = cp.array(orig, dtype=dtype)
predt = booster.inplace_predict(X)
cp.testing.assert_allclose(predt, predt_orig)
# boolean
orig = cp.random.binomial(1, 0.5, size=rows * cols).reshape(
rows, cols
)
predt_orig = booster.inplace_predict(orig)
for dtype in [cp.bool8, cp.bool_]:
X = cp.array(orig, dtype=dtype)
predt = booster.inplace_predict(X)
cp.testing.assert_allclose(predt, predt_orig)
# unsupported types
for dtype in [
cp.complex64,
cp.complex128,
]:
X = cp.array(orig, dtype=dtype)
with pytest.raises(ValueError):
booster.inplace_predict(X)
|
dmlc/xgboost
|
tests/python-gpu/test_gpu_prediction.py
|
Python
|
apache-2.0
| 17,716 | 0.000903 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# cellulist documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import cellulist
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cellulist'
copyright = u'2015, Elliot Marsden'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = cellulist.__version__
# The full version, including alpha/beta/rc tags.
release = cellulist.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cellulistdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'cellulist.tex',
u'cellulist Documentation',
u'Elliot Marsden', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cellulist',
u'cellulist Documentation',
[u'Elliot Marsden'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cellulist',
u'cellulist Documentation',
u'Elliot Marsden',
'cellulist',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
eddiejessup/cellulist
|
docs/conf.py
|
Python
|
bsd-3-clause
| 8,421 | 0.005344 |
from pytest_factoryboy import register
from meinberlin.test.factories import kiezkasse
register(kiezkasse.ProposalFactory)
|
liqd/a4-meinberlin
|
tests/kiezkasse/conftest.py
|
Python
|
agpl-3.0
| 125 | 0 |
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from wtforms.fields import StringField
from wtforms.validators import DataRequired
from wtforms_sqlalchemy.fields import QuerySelectField
from indico.core.db.sqlalchemy.descriptions import RenderMode
from indico.modules.events.sessions.models.sessions import Session
from indico.modules.events.tracks.models.groups import TrackGroup
from indico.util.i18n import _
from indico.web.forms.base import IndicoForm, generated_data
from indico.web.forms.fields import IndicoMarkdownField
class TrackForm(IndicoForm):
title = StringField(_('Title'), [DataRequired()])
code = StringField(_('Code'))
track_group = QuerySelectField(_('Track group'), default='', allow_blank=True, get_label='title',
description=_('Select a track group to which this track should belong'))
default_session = QuerySelectField(_('Default session'), default='', allow_blank=True, get_label='title',
description=_('Indico will preselect this session whenever an abstract is '
'accepted for the track'))
description = IndicoMarkdownField(_('Description'), editor=True)
def __init__(self, *args, **kwargs):
event = kwargs.pop('event')
super().__init__(*args, **kwargs)
self.default_session.query = Session.query.with_parent(event)
self.track_group.query = TrackGroup.query.with_parent(event)
class ProgramForm(IndicoForm):
program = IndicoMarkdownField(_('Program'), editor=True, mathjax=True)
@generated_data
def program_render_mode(self):
return RenderMode.markdown
class TrackGroupForm(IndicoForm):
title = StringField(_('Title'), [DataRequired()])
description = IndicoMarkdownField(_('Description'), editor=True)
|
indico/indico
|
indico/modules/events/tracks/forms.py
|
Python
|
mit
| 2,016 | 0.001984 |
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.network_common import load_provider
from ansible.module_utils.nxos import nxos_provider_spec
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
provider = load_provider(nxos_provider_spec, self._task.args)
transport = provider['transport'] or 'cli'
if self._play_context.connection != 'local' and transport == 'cli':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr)
if transport == 'cli':
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'nxos'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
# make sure we are in the right cli context which should be
# enable mode and not config module
rc, out, err = connection.exec_command('prompt()')
while str(out).strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
connection.exec_command('exit')
rc, out, err = connection.exec_command('prompt()')
task_vars['ansible_socket'] = socket_path
else:
provider['transport'] = 'nxapi'
if provider.get('host') is None:
provider['host'] = self._play_context.remote_addr
if provider.get('port') is None:
if provider.get('use_ssl'):
provider['port'] = 443
else:
provider['port'] = 80
if provider.get('timeout') is None:
provider['timeout'] = C.PERSISTENT_COMMAND_TIMEOUT
if provider.get('username') is None:
provider['username'] = self._play_context.connection_user
if provider.get('password') is None:
provider['password'] = self._play_context.password
if provider.get('use_ssl') is None:
provider['use_ssl'] = False
if provider.get('validate_certs') is None:
provider['validate_certs'] = True
self._task.args['provider'] = provider
# make sure a transport value is set in args
self._task.args['transport'] = transport
result = super(ActionModule, self).run(tmp, task_vars)
return result
|
rmfitzpatrick/ansible
|
lib/ansible/plugins/action/nxos.py
|
Python
|
gpl-3.0
| 4,638 | 0.001725 |
import _plotly_utils.basevalidators
class TypesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="typesrc",
parent_name="scatterternary.marker.gradient",
**kwargs
):
super(TypesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/scatterternary/marker/gradient/_typesrc.py
|
Python
|
mit
| 454 | 0 |
#!/usr/bin/env python
import os.path
import sys
# Version file managment scheme and graceful degredation for
# setuptools borrowed and adapted from GitPython.
try:
from setuptools import setup, find_packages
# Silence pyflakes
assert setup
assert find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
if sys.version_info < (2, 6):
raise RuntimeError('Python versions < 2.6 are not supported.')
# Utility function to read the contents of short files.
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
VERSION = read(os.path.join('wal_e', 'VERSION')).strip()
install_requires = [
l for l in read('requirements.txt').split('\n')
if l and not l.startswith('#')]
if sys.version_info < (2, 7):
install_requires.append('argparse>=0.8')
setup(
name="wal-e",
version=VERSION,
packages=find_packages(),
install_requires=install_requires,
# metadata for upload to PyPI
author="The WAL-E Contributors",
author_email="wal-e@googlegroups.com",
maintainer="Daniel Farina",
maintainer_email="daniel@heroku.com",
description="Continuous Archiving for Postgres",
long_description=read('README.rst'),
classifiers=['Topic :: Database',
'Topic :: System :: Archiving',
'Topic :: System :: Recovery Tools'],
platforms=['any'],
license="BSD",
keywords=("postgres postgresql database backup archive archiving s3 aws "
"openstack swift wabs azure wal shipping"),
url="https://github.com/wal-e/wal-e",
# Include the VERSION file
package_data={'wal_e': ['VERSION']},
# install
entry_points={'console_scripts': ['wal-e=wal_e.cmd:main']})
|
heroku/wal-e
|
setup.py
|
Python
|
bsd-3-clause
| 1,831 | 0.001092 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.