text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from cargo.base import CargoBase, lowercase, make_id_dict
class Image(CargoBase):
"""Python wrapper class encapsulating the metadata for a Docker Image"""
def __init__(self, *args, **kw):
super(Image, self).__init__(*args, **kw)
@property
def config(self, *args , **kw):
image = make_id_dict(self._dock._images).get(self._config.get('id'))
if image:
self._config = lowercase(image)
return self._config
@property
def image(self):
return self.config.get('image')
@property
def size(self):
return self.config.get('size')
@property
def vsize(self):
return self.config.get('virtualsize')
@property
def image_id(self):
return self.config.get('id')
@property
def repository(self):
return self.config.get('repository')
@property
def tag(self):
return self.config.get('tag')
def __repr__(self):
if self.repository:
return '<Image [%s:%s]>' % (self.repository, self.image_id[:12])
return '<Image [%s]>' % (self.image_id[:12],)
|
mvanveen/cargo
|
cargo/image.py
|
Python
|
mit
| 1,022 | 0.019569 |
"""
Constructs a planner that is good for being kinda like a car-boat thing!
"""
from __future__ import division
import numpy as np
import numpy.linalg as npl
from params import *
import lqrrt
################################################# DYNAMICS
magic_rudder = 6000
def dynamics(x, u, dt):
"""
Returns next state given last state x, wrench u, and timestep dt.
"""
# Rotation matrix (orientation, converts body to world)
R = np.array([
[np.cos(x[2]), -np.sin(x[2]), 0],
[np.sin(x[2]), np.cos(x[2]), 0],
[ 0, 0, 1]
])
# Construct drag coefficients based on our motion signs
D = np.copy(D_neg)
for i, v in enumerate(x[3:]):
if v >= 0:
D[i] = D_pos[i]
# Heading controller trying to keep us car-like
vw = R[:2, :2].dot(x[3:5])
ang = np.arctan2(vw[1], vw[0])
c = np.cos(x[2])
s = np.sin(x[2])
cg = np.cos(ang)
sg = np.sin(ang)
u[2] = magic_rudder*np.arctan2(sg*c - cg*s, cg*c + sg*s)
# Actuator saturation
u = B.dot(np.clip(invB.dot(u), -thrust_max, thrust_max))
# M*vdot + D*v = u and pdot = R*v
xdot = np.concatenate((R.dot(x[3:]), invM*(u - D*x[3:])))
# First-order integrate
xnext = x + xdot*dt
# Impose not driving backwards
if xnext[3] < 0:
xnext[3] = abs(x[3])
# # Impose not turning in place
# xnext[5] = np.clip(np.abs(xnext[3]/velmax_pos[0]), 0, 1) * xnext[5]
return xnext
################################################# POLICY
kp = np.diag([150, 150, 0])
kd = np.diag([150, 5, 0])
S = np.diag([1, 1, 1, 0, 0, 0])
def lqr(x, u):
"""
Returns cost-to-go matrix S and policy matrix K given local state x and effort u.
"""
R = np.array([
[np.cos(x[2]), -np.sin(x[2]), 0],
[np.sin(x[2]), np.cos(x[2]), 0],
[ 0, 0, 1]
])
K = np.hstack((kp.dot(R.T), kd))
return (S, K)
################################################# HEURISTICS
goal_buffer = [0.5*free_radius, 0.5*free_radius, np.inf, np.inf, np.inf, np.inf]
error_tol = np.copy(goal_buffer)/10
def gen_ss(seed, goal, buff=[ss_start]*4):
"""
Returns a sample space given a seed state, goal state, and buffer.
"""
return [(min([seed[0], goal[0]]) - buff[0], max([seed[0], goal[0]]) + buff[1]),
(min([seed[1], goal[1]]) - buff[2], max([seed[1], goal[1]]) + buff[3]),
(-np.pi, np.pi),
(0.9*velmax_pos[0], velmax_pos[0]),
(-abs(velmax_neg[1]), velmax_pos[1]),
(-abs(velmax_neg[2]), velmax_pos[2])]
################################################# MAIN ATTRIBUTES
constraints = lqrrt.Constraints(nstates=nstates, ncontrols=ncontrols,
goal_buffer=goal_buffer, is_feasible=unset)
planner = lqrrt.Planner(dynamics, lqr, constraints,
horizon=horizon, dt=dt, FPR=FPR,
error_tol=error_tol, erf=unset,
min_time=basic_duration, max_time=basic_duration, max_nodes=max_nodes,
sys_time=unset, printing=False)
|
jnez71/lqRRT
|
demos/lqrrt_ros/behaviors/car.py
|
Python
|
mit
| 3,231 | 0.005262 |
import copy
from nive.utils.dataPool2.mysql.tests import test_MySql
try:
from nive.utils.dataPool2.mysql.mySqlPool import *
except:
pass
from . import test_db
from nive.utils.dataPool2.sqlite.sqlite3Pool import *
mode = "mysql"
printed = [""]
def print_(*kw):
if type(kw)!=type(""):
v = ""
for a in kw:
v += " "+str(a)
else:
v = kw
if v == "":
print(".",)
printed.append("")
else:
printed[-1] += v
def getConnection():
if mode == "mysql":
c = MySqlConn(test_MySql.conn, 0)
print_( "MySQL -")
elif mode == "mysqlinno":
c = test_MySql.conn
c["dbName"] = "ut_dataPool2inno"
c = MySqlConn(c, 0)
print_( "MySQL InnoDB -")
else:
c = Sqlite3Conn(test_db.conn, 0)
print_( "Sqlite 3 -")
return c
def getPool():
if mode == "mysql":
pool = MySql(test_MySql.conf)
pool.SetStdMeta(copy.copy(test_MySql.stdMeta))
pool.GetPoolStructureObj().SetStructure(test_MySql.struct)
pool.CreateConnection(test_MySql.conn)
print_( "MySQL -")
elif mode == "mysqlinno":
pool = MySql(test_MySql.conf)
pool.SetStdMeta(copy.copy(test_MySql.stdMeta))
pool.GetPoolStructureObj().SetStructure(test_MySql.struct)
c = test_MySql.conn
c["dbName"] = "ut_dataPool2inno"
pool.CreateConnection(c)
print_( "MySQL InnoDB -")
else:
pool = Sqlite3(test_db.conf)
pool.SetStdMeta(copy.copy(test_db.stdMeta))
pool.GetPoolStructureObj().SetStructure(test_db.struct)
pool.CreateConnection(test_db.conn)
print_( "Sqlite 3 -")
return pool
def empty():
#if mode == "mysql":
# test_MySql.emptypool()
#elif mode == "mysqlinno":
# test_MySql.emptypool()
#else:
# t_db.emptypool()
pass
def connects(n):
c = getConnection()
print_( "Connection: ")
t = time.time()
for i in range(0,n):
c.connect()
c.Close()
t2 = time.time()
print_( n, " connects in ", t2-t, "secs. ", (t2-t)/n, " per connect")
print_()
def cursors(n):
c = getConnection()
c.connect()
print_( "Cursor: ")
t = time.time()
for i in range(0,n):
cu = c.cursor()
cu.close()
t2 = time.time()
c.Close()
print_( n, " cursors in ", t2-t, "secs. ", (t2-t)/n, " per cursor")
print_()
def createsql(n):
pool = getPool()
print_( "Create SQL: ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta)+list(test_MySql.struct["data1"]),
{"pool_type": "data1", "ftext": "", "fnumber": 3},
sort = "title, id, fnumber",
ascending = 0,
dataTable = "data1",
operators={"pool_type":"=", "ftext": "<>", "fnumber": ">"},
start=1,
max=123)
t2 = time.time()
pool.Close()
print_( n, " sql statements in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery1(n, start):
pool = getPool()
print_( "SQL Query data+meta (join no index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta)+list(test_MySql.struct["data1"]),
{"pool_type": "data1", "ftext": "123", "fnumber": i+start},
sort = "title, id, fnumber",
ascending = 0,
dataTable = "data1",
operators={"pool_type":"=", "ftext": "LIKE", "fnumber": "!="},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery2(n, start):
pool = getPool()
print_( "SQL Query data+meta=id (join index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta)+list(test_MySql.struct["data1"]),
{"id": i+start},
sort = "title",
ascending = 0,
dataTable = "data1",
operators={},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery3(n, start):
pool = getPool()
print_( "SQL Query meta=id (index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta),
{"id": start+i},
sort = "id",
ascending = 0,
dataTable = "pool_meta",
singleTable = 1,
operators={},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery4(n, start):
pool = getPool()
print_( "SQL Query meta=id+pool_type=data1 (index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta),
{"id": start+i, "pool_type": "data1"},
sort = "id",
ascending = 0,
dataTable = "pool_meta",
singleTable = 1,
operators={"pool_type": "="},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery5(n, start):
pool = getPool()
print_( "SQL Query meta=id+pool_type=data1+data.funit (join index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta),
{"id": start+i, "pool_type": "data1", "funit": 35},
sort = "id",
ascending = 0,
dataTable = "data1",
operators={"pool_type": "="},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery6(n):
pool = getPool()
print_( "SQL Query filename (text index): ")
t = time.time()
for i in range(0,n):
files = pool.SearchFilename("file1xxx.txt")
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def createentries(n):
pool = getPool()
print_( "Create entries (nodb): ")
t = time.time()
for i in range(0,n):
e=pool._GetPoolEntry(i, version=None, datatbl="data1", preload="skip", virtual=True)
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def checkentries(n):
pool = getPool()
print_( "Create entries (nodb) and check exists: ")
t = time.time()
for i in range(0,n):
e=pool._GetPoolEntry(i, version=None, datatbl="data1", preload="skip", virtual=True)
e.Exists()
t2 = time.time()
pool.Close()
print_( n, " checks in ", t2-t, "secs. ", (t2-t)/n, " per check")
print_()
def createentries2(n):
pool = getPool()
print_( "Create entries (nodata): ")
t = time.time()
for i in range(0,n):
e=pool.CreateEntry("data1")
#e.data.update(data1_1)
#e.meta.update(meta1)
e.Commit()
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def createentries3(n):
pool = getPool()
print_( "Create entries (data+meta): ")
t = time.time()
for i in range(0,n):
e=pool.CreateEntry("data1")
if i==0: id = e.GetID()
e.data.update(test_MySql.data1_1)
e.meta.update(test_MySql.meta1)
e.Commit()
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
return id
def createentries4(n):
pool = getPool()
print_( "Create entries (data+meta+file): ")
t = time.time()
for i in range(0,n):
e=pool.CreateEntry("data1")
if i==0: id = e.GetID()
e.data.update(test_MySql.data1_1)
e.meta.update(test_MySql.meta1)
e.CommitFile("file1", {"file":test_MySql.file1_1, "filename": "file1.txt"})
e.Commit()
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
return id
def getentries1(n, start):
pool = getPool()
print_( "Get entries (all): ")
t = time.time()
for i in range(0,n):
e=pool.GetEntry(i+start)
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def getentries2(n, start):
pool = getPool()
print_( "Get entries (all+file): ")
t = time.time()
for i in range(0,n):
e=pool.GetEntry(i+start, preload="all")
f=e.GetFile("file1")
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def getentries5(n, start):
pool = getPool()
print_( "Get entries (all+filestream): ")
t = time.time()
for i in range(0,n):
e=pool.GetEntry(i+start, preload="all")
#f=e.GetFile("file1")
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def getentries4(n, start):
pool = getPool()
print_( "Get entries (meta): ")
t = time.time()
for i in range(0,n):
e=pool.GetEntry(i+start, preload="meta")
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def getbatch1(n, start):
pool = getPool()
print_( "Get batch (no preload): ")
t = time.time()
ids = []
for i in range(0,n):
ids.append(i+start)
e=pool.GetBatch(ids, preload="skip")
t2 = time.time()
del e
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def getbatch2(n, start):
pool = getPool()
print_( "Get batch (meta): ")
t = time.time()
ids = []
for i in range(0,n):
ids.append(i+start)
e=pool.GetBatch(ids, preload="meta")
t2 = time.time()
del e
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def getbatch3(n, start):
pool = getPool()
print_( "Get batch (all): ")
t = time.time()
ids = []
for i in range(0,n):
ids.append(i+start)
e=pool.GetBatch(ids, preload="all")
t2 = time.time()
del e
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def delentries(n, start):
pool = getPool()
print_( "Delete entries (meta+data): ")
t = time.time()
for i in range(0,n):
pool.DeleteEntry(i+start)
pool.Commit()
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def delentries2(n, start):
pool = getPool()
print_( "Delete entries (meta+data+file): ")
t = time.time()
for i in range(0,n):
pool.DeleteEntry(i+start)
pool.Commit()
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def report(modes, printed):
rep=[]
c = len(printed)/len(modes)
for n in range(0, c):
p = 0
for m in modes:
rep.append(printed[p*c+n])
p+=1
print()
print()
i=0
for p in rep:
print(p)
i+=1
if i==len(modes):
print()
i=0
def run(modes):
global mode , printed
n = 1000
printed = [""]
for m in modes:
mode = m
print()
print(mode,)
empty()
connects(n)
cursors(n)
createsql(n)
createentries(n)
checkentries(n)
createentries2(n)
id = createentries3(n)
id2 = createentries4(n)
getentries1(n, id2)
getentries2(n, id2)
getentries5(n, id2)
getentries4(n, id2)
getbatch1(n, id2)
getbatch2(n, id2)
getbatch3(n, id2)
sqlquery1(n, id2)
sqlquery2(n, id)
sqlquery3(n, id2)
sqlquery4(n, id)
sqlquery5(n, id2)
sqlquery6(n)
delentries(n, id)
delentries2(n, id2)
report(modes, printed)
if __name__ == '__main__':
#run(("sqlite3",))
run(("sqlite3","mysql","mysqlinno"))
|
nive/nive
|
nive/utils/dataPool2/tests/performance_test.py
|
Python
|
gpl-3.0
| 12,761 | 0.014262 |
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2010 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
__doc__="""Use OpenDocument to generate your documents."""
import zipfile, time, sys, mimetypes, copy
from cStringIO import StringIO
from namespaces import *
import manifest, meta
from office import *
import element
from attrconverters import make_NCName
from xml.sax.xmlreader import InputSource
from odfmanifest import manifestlist
__version__= TOOLSVERSION
_XMLPROLOGUE = u"<?xml version='1.0' encoding='UTF-8'?>\n"
UNIXPERMS = 0100644 << 16L # -rw-r--r--
IS_FILENAME = 0
IS_IMAGE = 1
# We need at least Python 2.2
assert sys.version_info[0]>=2 and sys.version_info[1] >= 2
#sys.setrecursionlimit(100)
#The recursion limit is set conservative so mistakes like
# s=content() s.addElement(s) won't eat up too much processor time.
odmimetypes = {
'application/vnd.oasis.opendocument.text': '.odt',
'application/vnd.oasis.opendocument.text-template': '.ott',
'application/vnd.oasis.opendocument.graphics': '.odg',
'application/vnd.oasis.opendocument.graphics-template': '.otg',
'application/vnd.oasis.opendocument.presentation': '.odp',
'application/vnd.oasis.opendocument.presentation-template': '.otp',
'application/vnd.oasis.opendocument.spreadsheet': '.ods',
'application/vnd.oasis.opendocument.spreadsheet-template': '.ots',
'application/vnd.oasis.opendocument.chart': '.odc',
'application/vnd.oasis.opendocument.chart-template': '.otc',
'application/vnd.oasis.opendocument.image': '.odi',
'application/vnd.oasis.opendocument.image-template': '.oti',
'application/vnd.oasis.opendocument.formula': '.odf',
'application/vnd.oasis.opendocument.formula-template': '.otf',
'application/vnd.oasis.opendocument.text-master': '.odm',
'application/vnd.oasis.opendocument.text-web': '.oth',
}
class OpaqueObject:
def __init__(self, filename, mediatype, content=None):
self.mediatype = mediatype
self.filename = filename
self.content = content
class OpenDocument:
""" A class to hold the content of an OpenDocument document
Use the xml method to write the XML
source to the screen or to a file
d = OpenDocument(mimetype)
fd.write(d.xml())
"""
thumbnail = None
def __init__(self, mimetype, add_generator=True):
self.mimetype = mimetype
self.childobjects = []
self._extra = []
self.folder = "" # Always empty for toplevel documents
self.topnode = Document(mimetype=self.mimetype)
self.topnode.ownerDocument = self
self.clear_caches()
self.Pictures = {}
self.meta = Meta()
self.topnode.addElement(self.meta)
if add_generator:
self.meta.addElement(meta.Generator(text=TOOLSVERSION))
self.scripts = Scripts()
self.topnode.addElement(self.scripts)
self.fontfacedecls = FontFaceDecls()
self.topnode.addElement(self.fontfacedecls)
self.settings = Settings()
self.topnode.addElement(self.settings)
self.styles = Styles()
self.topnode.addElement(self.styles)
self.automaticstyles = AutomaticStyles()
self.topnode.addElement(self.automaticstyles)
self.masterstyles = MasterStyles()
self.topnode.addElement(self.masterstyles)
self.body = Body()
self.topnode.addElement(self.body)
def rebuild_caches(self, node=None):
if node is None: node = self.topnode
self.build_caches(node)
for e in node.childNodes:
if e.nodeType == element.Node.ELEMENT_NODE:
self.rebuild_caches(e)
def clear_caches(self):
self.element_dict = {}
self._styles_dict = {}
self._styles_ooo_fix = {}
def build_caches(self, element):
""" Called from element.py
"""
if not self.element_dict.has_key(element.qname):
self.element_dict[element.qname] = []
self.element_dict[element.qname].append(element)
if element.qname == (STYLENS, u'style'):
self.__register_stylename(element) # Add to style dictionary
styleref = element.getAttrNS(TEXTNS,u'style-name')
if styleref is not None and self._styles_ooo_fix.has_key(styleref):
element.setAttrNS(TEXTNS,u'style-name', self._styles_ooo_fix[styleref])
def __register_stylename(self, element):
''' Register a style. But there are three style dictionaries:
office:styles, office:automatic-styles and office:master-styles
Chapter 14
'''
name = element.getAttrNS(STYLENS, u'name')
if name is None:
return
if element.parentNode.qname in ((OFFICENS,u'styles'), (OFFICENS,u'automatic-styles')):
if self._styles_dict.has_key(name):
newname = 'M'+name # Rename style
self._styles_ooo_fix[name] = newname
# From here on all references to the old name will refer to the new one
name = newname
element.setAttrNS(STYLENS, u'name', name)
self._styles_dict[name] = element
def toXml(self, filename=''):
xml=StringIO()
xml.write(_XMLPROLOGUE)
self.body.toXml(0, xml)
if not filename:
return xml.getvalue()
else:
f=file(filename,'w')
f.write(xml.getvalue())
f.close()
def xml(self):
""" Generates the full document as an XML file
Always written as a bytestream in UTF-8 encoding
"""
self.__replaceGenerator()
xml=StringIO()
xml.write(_XMLPROLOGUE)
self.topnode.toXml(0, xml)
return xml.getvalue()
def contentxml(self):
""" Generates the content.xml file
Always written as a bytestream in UTF-8 encoding
"""
xml=StringIO()
xml.write(_XMLPROLOGUE)
x = DocumentContent()
x.write_open_tag(0, xml)
if self.scripts.hasChildNodes():
self.scripts.toXml(1, xml)
if self.fontfacedecls.hasChildNodes():
self.fontfacedecls.toXml(1, xml)
a = AutomaticStyles()
stylelist = self._used_auto_styles([self.styles, self.automaticstyles, self.body])
if len(stylelist) > 0:
a.write_open_tag(1, xml)
for s in stylelist:
s.toXml(2, xml)
a.write_close_tag(1, xml)
else:
a.toXml(1, xml)
self.body.toXml(1, xml)
x.write_close_tag(0, xml)
return xml.getvalue()
def __manifestxml(self):
""" Generates the manifest.xml file
The self.manifest isn't avaible unless the document is being saved
"""
xml=StringIO()
xml.write(_XMLPROLOGUE)
self.manifest.toXml(0,xml)
return xml.getvalue()
def metaxml(self):
""" Generates the meta.xml file """
self.__replaceGenerator()
x = DocumentMeta()
x.addElement(self.meta)
xml=StringIO()
xml.write(_XMLPROLOGUE)
x.toXml(0,xml)
return xml.getvalue()
def settingsxml(self):
""" Generates the settings.xml file """
x = DocumentSettings()
x.addElement(self.settings)
xml=StringIO()
xml.write(_XMLPROLOGUE)
x.toXml(0,xml)
return xml.getvalue()
def _parseoneelement(self, top, stylenamelist):
""" Finds references to style objects in master-styles
and add the style name to the style list if not already there.
Recursive
"""
for e in top.childNodes:
if e.nodeType == element.Node.ELEMENT_NODE:
for styleref in (
(CHARTNS,u'style-name'),
(DRAWNS,u'style-name'),
(DRAWNS,u'text-style-name'),
(PRESENTATIONNS,u'style-name'),
(STYLENS,u'data-style-name'),
(STYLENS,u'list-style-name'),
(STYLENS,u'page-layout-name'),
(STYLENS,u'style-name'),
(TABLENS,u'default-cell-style-name'),
(TABLENS,u'style-name'),
(TEXTNS,u'style-name') ):
if e.getAttrNS(styleref[0],styleref[1]):
stylename = e.getAttrNS(styleref[0],styleref[1])
if stylename not in stylenamelist:
stylenamelist.append(stylename)
stylenamelist = self._parseoneelement(e, stylenamelist)
return stylenamelist
def _used_auto_styles(self, segments):
""" Loop through the masterstyles elements, and find the automatic
styles that are used. These will be added to the automatic-styles
element in styles.xml
"""
stylenamelist = []
for top in segments:
stylenamelist = self._parseoneelement(top, stylenamelist)
stylelist = []
for e in self.automaticstyles.childNodes:
if e.getAttrNS(STYLENS,u'name') in stylenamelist:
stylelist.append(e)
return stylelist
def stylesxml(self):
""" Generates the styles.xml file """
xml=StringIO()
xml.write(_XMLPROLOGUE)
x = DocumentStyles()
x.write_open_tag(0, xml)
if self.fontfacedecls.hasChildNodes():
self.fontfacedecls.toXml(1, xml)
self.styles.toXml(1, xml)
a = AutomaticStyles()
a.write_open_tag(1, xml)
for s in self._used_auto_styles([self.masterstyles]):
s.toXml(2, xml)
a.write_close_tag(1, xml)
if self.masterstyles.hasChildNodes():
self.masterstyles.toXml(1, xml)
x.write_close_tag(0, xml)
return xml.getvalue()
def addPicture(self, filename, mediatype=None, content=None):
""" Add a picture
It uses the same convention as OOo, in that it saves the picture in
the zipfile in the subdirectory 'Pictures'
If passed a file ptr, mediatype must be set
"""
if content is None:
if mediatype is None:
mediatype, encoding = mimetypes.guess_type(filename)
if mediatype is None:
mediatype = ''
try: ext = filename[filename.rindex('.'):]
except: ext=''
else:
ext = mimetypes.guess_extension(mediatype)
manifestfn = "Pictures/%0.0f%s" % ((time.time()*10000000000), ext)
self.Pictures[manifestfn] = (IS_FILENAME, filename, mediatype)
else:
manifestfn = filename
self.Pictures[manifestfn] = (IS_IMAGE, content, mediatype)
return manifestfn
def addPictureFromFile(self, filename, mediatype=None):
""" Add a picture
It uses the same convention as OOo, in that it saves the picture in
the zipfile in the subdirectory 'Pictures'.
If mediatype is not given, it will be guessed from the filename
extension.
"""
if mediatype is None:
mediatype, encoding = mimetypes.guess_type(filename)
if mediatype is None:
mediatype = ''
try: ext = filename[filename.rindex('.'):]
except ValueError: ext=''
else:
ext = mimetypes.guess_extension(mediatype)
manifestfn = "Pictures/%0.0f%s" % ((time.time()*10000000000), ext)
self.Pictures[manifestfn] = (IS_FILENAME, filename, mediatype)
return manifestfn
def addPictureFromString(self, content, mediatype):
""" Add a picture
It uses the same convention as OOo, in that it saves the picture in
the zipfile in the subdirectory 'Pictures'. The content variable
is a string that contains the binary image data. The mediatype
indicates the image format.
"""
ext = mimetypes.guess_extension(mediatype)
manifestfn = "Pictures/%0.0f%s" % ((time.time()*10000000000), ext)
self.Pictures[manifestfn] = (IS_IMAGE, content, mediatype)
return manifestfn
def addThumbnail(self, filecontent=None):
""" Add a fixed thumbnail
The thumbnail in the library is big, so this is pretty useless.
"""
if filecontent is None:
import thumbnail
self.thumbnail = thumbnail.thumbnail()
else:
self.thumbnail = filecontent
def addObject(self, document, objectname=None):
""" Adds an object (subdocument). The object must be an OpenDocument class
The return value will be the folder in the zipfile the object is stored in
"""
self.childobjects.append(document)
if objectname is None:
document.folder = "%s/Object %d" % (self.folder, len(self.childobjects))
else:
document.folder = objectname
return ".%s" % document.folder
def _savePictures(self, object, folder):
hasPictures = False
for arcname, picturerec in object.Pictures.items():
what_it_is, fileobj, mediatype = picturerec
self.manifest.addElement(manifest.FileEntry(fullpath="%s%s" % ( folder ,arcname), mediatype=mediatype))
hasPictures = True
if what_it_is == IS_FILENAME:
self._z.write(fileobj, arcname, zipfile.ZIP_STORED)
else:
zi = zipfile.ZipInfo(str(arcname), self._now)
zi.compress_type = zipfile.ZIP_STORED
zi.external_attr = UNIXPERMS
self._z.writestr(zi, fileobj)
# According to section 17.7.3 in ODF 1.1, the pictures folder should not have a manifest entry
# if hasPictures:
# self.manifest.addElement(manifest.FileEntry(fullpath="%sPictures/" % folder, mediatype=""))
# Look in subobjects
subobjectnum = 1
for subobject in object.childobjects:
self._savePictures(subobject,'%sObject %d/' % (folder, subobjectnum))
subobjectnum += 1
def __replaceGenerator(self):
""" Section 3.1.1: The application MUST NOT export the original identifier
belonging to the application that created the document.
"""
for m in self.meta.childNodes[:]:
if m.qname == (METANS, u'generator'):
self.meta.removeChild(m)
self.meta.addElement(meta.Generator(text=TOOLSVERSION))
def save(self, outputfile, addsuffix=False):
""" Save the document under the filename.
If the filename is '-' then save to stdout
"""
if outputfile == '-':
outputfp = zipfile.ZipFile(sys.stdout,"w")
else:
if addsuffix:
outputfile = outputfile + odmimetypes.get(self.mimetype,'.xxx')
outputfp = zipfile.ZipFile(outputfile, "w")
self.__zipwrite(outputfp)
outputfp.close()
def write(self, outputfp):
""" User API to write the ODF file to an open file descriptor
Writes the ZIP format
"""
zipoutputfp = zipfile.ZipFile(outputfp,"w")
self.__zipwrite(zipoutputfp)
def __zipwrite(self, outputfp):
""" Write the document to an open file pointer
This is where the real work is done
"""
self._z = outputfp
self._now = time.localtime()[:6]
self.manifest = manifest.Manifest()
# Write mimetype
zi = zipfile.ZipInfo('mimetype', self._now)
zi.compress_type = zipfile.ZIP_STORED
zi.external_attr = UNIXPERMS
self._z.writestr(zi, self.mimetype)
self._saveXmlObjects(self,"")
# Write pictures
self._savePictures(self,"")
# Write the thumbnail
if self.thumbnail is not None:
self.manifest.addElement(manifest.FileEntry(fullpath="Thumbnails/", mediatype=''))
self.manifest.addElement(manifest.FileEntry(fullpath="Thumbnails/thumbnail.png", mediatype=''))
zi = zipfile.ZipInfo("Thumbnails/thumbnail.png", self._now)
zi.compress_type = zipfile.ZIP_DEFLATED
zi.external_attr = UNIXPERMS
self._z.writestr(zi, self.thumbnail)
# Write any extra files
for op in self._extra:
if op.filename == "META-INF/documentsignatures.xml": continue # Don't save signatures
self.manifest.addElement(manifest.FileEntry(fullpath=op.filename, mediatype=op.mediatype))
zi = zipfile.ZipInfo(op.filename.encode('utf-8'), self._now)
zi.compress_type = zipfile.ZIP_DEFLATED
zi.external_attr = UNIXPERMS
if op.content is not None:
self._z.writestr(zi, op.content)
# Write manifest
zi = zipfile.ZipInfo("META-INF/manifest.xml", self._now)
zi.compress_type = zipfile.ZIP_DEFLATED
zi.external_attr = UNIXPERMS
self._z.writestr(zi, self.__manifestxml() )
del self._z
del self._now
del self.manifest
def _saveXmlObjects(self, object, folder):
if self == object:
self.manifest.addElement(manifest.FileEntry(fullpath="/", mediatype=object.mimetype))
else:
self.manifest.addElement(manifest.FileEntry(fullpath=folder, mediatype=object.mimetype))
# Write styles
self.manifest.addElement(manifest.FileEntry(fullpath="%sstyles.xml" % folder, mediatype="text/xml"))
zi = zipfile.ZipInfo("%sstyles.xml" % folder, self._now)
zi.compress_type = zipfile.ZIP_DEFLATED
zi.external_attr = UNIXPERMS
self._z.writestr(zi, object.stylesxml() )
# Write content
self.manifest.addElement(manifest.FileEntry(fullpath="%scontent.xml" % folder, mediatype="text/xml"))
zi = zipfile.ZipInfo("%scontent.xml" % folder, self._now)
zi.compress_type = zipfile.ZIP_DEFLATED
zi.external_attr = UNIXPERMS
self._z.writestr(zi, object.contentxml() )
# Write settings
if object.settings.hasChildNodes():
self.manifest.addElement(manifest.FileEntry(fullpath="%ssettings.xml" % folder, mediatype="text/xml"))
zi = zipfile.ZipInfo("%ssettings.xml" % folder, self._now)
zi.compress_type = zipfile.ZIP_DEFLATED
zi.external_attr = UNIXPERMS
self._z.writestr(zi, object.settingsxml() )
# Write meta
if self == object:
self.manifest.addElement(manifest.FileEntry(fullpath="meta.xml", mediatype="text/xml"))
zi = zipfile.ZipInfo("meta.xml", self._now)
zi.compress_type = zipfile.ZIP_DEFLATED
zi.external_attr = UNIXPERMS
self._z.writestr(zi, object.metaxml() )
# Write subobjects
subobjectnum = 1
for subobject in object.childobjects:
self._saveXmlObjects(subobject, '%sObject %d/' % (folder, subobjectnum))
subobjectnum += 1
# Document's DOM methods
def createElement(self, element):
""" Inconvenient interface to create an element, but follows XML-DOM.
Does not allow attributes as argument, therefore can't check grammar.
"""
return element(check_grammar=False)
def createTextNode(self, data):
""" Method to create a text node """
return element.Text(data)
def createCDATASection(self, data):
""" Method to create a CDATA section """
return element.CDATASection(cdata)
def getMediaType(self):
""" Returns the media type """
return self.mimetype
def getStyleByName(self, name):
""" Finds a style object based on the name """
ncname = make_NCName(name)
if self._styles_dict == {}:
self.rebuild_caches()
return self._styles_dict.get(ncname, None)
def getElementsByType(self, element):
""" Gets elements based on the type, which is function from text.py, draw.py etc. """
obj = element(check_grammar=False)
if self.element_dict == {}:
self.rebuild_caches()
return self.element_dict.get(obj.qname, [])
# Convenience functions
def OpenDocumentChart():
""" Creates a chart document """
doc = OpenDocument('application/vnd.oasis.opendocument.chart')
doc.chart = Chart()
doc.body.addElement(doc.chart)
return doc
def OpenDocumentDrawing():
""" Creates a drawing document """
doc = OpenDocument('application/vnd.oasis.opendocument.graphics')
doc.drawing = Drawing()
doc.body.addElement(doc.drawing)
return doc
def OpenDocumentImage():
""" Creates an image document """
doc = OpenDocument('application/vnd.oasis.opendocument.image')
doc.image = Image()
doc.body.addElement(doc.image)
return doc
def OpenDocumentPresentation():
""" Creates a presentation document """
doc = OpenDocument('application/vnd.oasis.opendocument.presentation')
doc.presentation = Presentation()
doc.body.addElement(doc.presentation)
return doc
def OpenDocumentSpreadsheet():
""" Creates a spreadsheet document """
doc = OpenDocument('application/vnd.oasis.opendocument.spreadsheet')
doc.spreadsheet = Spreadsheet()
doc.body.addElement(doc.spreadsheet)
return doc
def OpenDocumentText():
""" Creates a text document """
doc = OpenDocument('application/vnd.oasis.opendocument.text')
doc.text = Text()
doc.body.addElement(doc.text)
return doc
def OpenDocumentTextMaster():
""" Creates a text master document """
doc = OpenDocument('application/vnd.oasis.opendocument.text-master')
doc.text = Text()
doc.body.addElement(doc.text)
return doc
def __loadxmlparts(z, manifest, doc, objectpath):
from load import LoadParser
from xml.sax import make_parser, handler
for xmlfile in (objectpath+'settings.xml', objectpath+'meta.xml', objectpath+'content.xml', objectpath+'styles.xml'):
if not manifest.has_key(xmlfile):
continue
try:
xmlpart = z.read(xmlfile)
doc._parsing = xmlfile
parser = make_parser()
parser.setFeature(handler.feature_namespaces, 1)
parser.setContentHandler(LoadParser(doc))
parser.setErrorHandler(handler.ErrorHandler())
inpsrc = InputSource()
inpsrc.setByteStream(StringIO(xmlpart))
parser.setFeature(handler.feature_external_ges, False) # Changed by Kovid to ignore external DTDs
parser.parse(inpsrc)
del doc._parsing
except KeyError, v: pass
def load(odffile):
""" Load an ODF file into memory
Returns a reference to the structure
"""
z = zipfile.ZipFile(odffile)
try:
mimetype = z.read('mimetype')
except KeyError: # Added by Kovid to handle malformed odt files
mimetype = 'application/vnd.oasis.opendocument.text'
doc = OpenDocument(mimetype, add_generator=False)
# Look in the manifest file to see if which of the four files there are
manifestpart = z.read('META-INF/manifest.xml')
manifest = manifestlist(manifestpart)
__loadxmlparts(z, manifest, doc, '')
for mentry,mvalue in manifest.items():
if mentry[:9] == "Pictures/" and len(mentry) > 9:
doc.addPicture(mvalue['full-path'], mvalue['media-type'], z.read(mentry))
elif mentry == "Thumbnails/thumbnail.png":
doc.addThumbnail(z.read(mentry))
elif mentry in ('settings.xml', 'meta.xml', 'content.xml', 'styles.xml'):
pass
# Load subobjects into structure
elif mentry[:7] == "Object " and len(mentry) < 11 and mentry[-1] == "/":
subdoc = OpenDocument(mvalue['media-type'], add_generator=False)
doc.addObject(subdoc, "/" + mentry[:-1])
__loadxmlparts(z, manifest, subdoc, mentry)
elif mentry[:7] == "Object ":
pass # Don't load subobjects as opaque objects
else:
if mvalue['full-path'][-1] == '/':
doc._extra.append(OpaqueObject(mvalue['full-path'], mvalue['media-type'], None))
else:
doc._extra.append(OpaqueObject(mvalue['full-path'], mvalue['media-type'], z.read(mentry)))
# Add the SUN junk here to the struct somewhere
# It is cached data, so it can be out-of-date
z.close()
b = doc.getElementsByType(Body)
if mimetype[:39] == 'application/vnd.oasis.opendocument.text':
doc.text = b[0].firstChild
elif mimetype[:43] == 'application/vnd.oasis.opendocument.graphics':
doc.graphics = b[0].firstChild
elif mimetype[:47] == 'application/vnd.oasis.opendocument.presentation':
doc.presentation = b[0].firstChild
elif mimetype[:46] == 'application/vnd.oasis.opendocument.spreadsheet':
doc.spreadsheet = b[0].firstChild
elif mimetype[:40] == 'application/vnd.oasis.opendocument.chart':
doc.chart = b[0].firstChild
elif mimetype[:40] == 'application/vnd.oasis.opendocument.image':
doc.image = b[0].firstChild
elif mimetype[:42] == 'application/vnd.oasis.opendocument.formula':
doc.formula = b[0].firstChild
return doc
# vim: set expandtab sw=4 :
|
ashang/calibre
|
src/odf/opendocument.py
|
Python
|
gpl-3.0
| 26,274 | 0.004948 |
import json
from DIRAC.Core.Base.Client import Client
from DIRAC import S_OK, S_ERROR
from DIRAC.DataManagementSystem.private.FTS3Utilities import FTS3JSONDecoder
class FTS3Client(Client):
""" Client code to the FTS3 service
"""
def __init__(self, url=None, **kwargs):
""" Constructor function.
"""
Client.__init__(self, **kwargs)
self.setServer('DataManagement/FTS3Manager')
if url:
self.setServer(url)
def persistOperation(self, opObj, **kwargs):
""" Persist (insert/update) an FTS3Operation object into the db
:param opObj: instance of FTS3Operation
"""
# In case someone manually set sourceSEs as a list:
if isinstance(opObj.sourceSEs, list):
opObj.sourceSEs = ','.join(opObj.sourceSEs)
opJSON = opObj.toJSON()
return self._getRPC(**kwargs).persistOperation(opJSON)
def getOperation(self, operationID, **kwargs):
""" Get the FTS3Operation from the database
:param operationID: id of the operation
:return: FTS3Operation object
"""
res = self._getRPC(**kwargs).getOperation(operationID)
if not res['OK']:
return res
opJSON = res['Value']
try:
opObj = json.loads(opJSON, cls=FTS3JSONDecoder)
return S_OK(opObj)
except Exception as e:
return S_ERROR("Exception when decoding the FTS3Operation object %s" % e)
def getActiveJobs(self, limit=20, lastMonitor=None, jobAssignmentTag='Assigned', ** kwargs):
""" Get all the FTSJobs that are not in a final state
:param limit: max number of jobs to retrieve
:return: list of FTS3Jobs
"""
res = self._getRPC(**kwargs).getActiveJobs(limit, lastMonitor, jobAssignmentTag)
if not res['OK']:
return res
activeJobsJSON = res['Value']
try:
activeJobs = json.loads(activeJobsJSON, cls=FTS3JSONDecoder)
return S_OK(activeJobs)
except Exception as e:
return S_ERROR("Exception when decoding the active jobs json %s" % e)
def updateFileStatus(self, fileStatusDict, ftsGUID=None, **kwargs):
""" Update the file ftsStatus and error
:param fileStatusDict : { fileID : { status , error, ftsGUID } }
:param ftsGUID: if specified, only update the files having a matchign ftsGUID
"""
return self._getRPC(**kwargs).updateFileStatus(fileStatusDict, ftsGUID)
def updateJobStatus(self, jobStatusDict, **kwargs):
""" Update the job Status and error
:param jobStatusDict : { jobID : { status , error } }
"""
return self._getRPC(**kwargs).updateJobStatus(jobStatusDict)
def getNonFinishedOperations(self, limit=20, operationAssignmentTag="Assigned", **kwargs):
""" Get all the FTS3Operations that have files in New or Failed state
(reminder: Failed is NOT terminal for files. Failed is when fts failed, but we
can retry)
:param limit: max number of jobs to retrieve
:return: json list of FTS3Operation
"""
res = self._getRPC(**kwargs).getNonFinishedOperations(limit, operationAssignmentTag)
if not res['OK']:
return res
operationsJSON = res['Value']
try:
operations = json.loads(operationsJSON, cls=FTS3JSONDecoder)
return S_OK(operations)
except Exception as e:
return S_ERROR(0, "Exception when decoding the non finished operations json %s" % e)
def getOperationsFromRMSOpID(self, rmsOpID, **kwargs):
""" Get the FTS3Operations matching a given RMS Operation
:param rmsOpID: id of the operation in the RMS
:return: list of FTS3Operation objects
"""
res = self._getRPC(**kwargs).getOperationsFromRMSOpID(rmsOpID)
if not res['OK']:
return res
operationsJSON = res['Value']
try:
operations = json.loads(operationsJSON, cls=FTS3JSONDecoder)
return S_OK(operations)
except Exception as e:
return S_ERROR(0, "Exception when decoding the operations json %s" % e)
|
andresailer/DIRAC
|
DataManagementSystem/Client/FTS3Client.py
|
Python
|
gpl-3.0
| 3,920 | 0.008673 |
"""Google config for Cloud."""
import asyncio
from http import HTTPStatus
import logging
from hass_nabucasa import Cloud, cloud_api
from hass_nabucasa.google_report_state import ErrorResponse
from homeassistant.components.google_assistant.const import DOMAIN as GOOGLE_DOMAIN
from homeassistant.components.google_assistant.helpers import AbstractConfig
from homeassistant.const import (
CLOUD_NEVER_EXPOSED_ENTITIES,
ENTITY_CATEGORY_CONFIG,
ENTITY_CATEGORY_DIAGNOSTIC,
)
from homeassistant.core import CoreState, split_entity_id
from homeassistant.helpers import entity_registry as er, start
from homeassistant.setup import async_setup_component
from .const import (
CONF_ENTITY_CONFIG,
DEFAULT_DISABLE_2FA,
PREF_DISABLE_2FA,
PREF_SHOULD_EXPOSE,
)
from .prefs import CloudPreferences
_LOGGER = logging.getLogger(__name__)
class CloudGoogleConfig(AbstractConfig):
"""HA Cloud Configuration for Google Assistant."""
def __init__(
self, hass, config, cloud_user: str, prefs: CloudPreferences, cloud: Cloud
):
"""Initialize the Google config."""
super().__init__(hass)
self._config = config
self._user = cloud_user
self._prefs = prefs
self._cloud = cloud
self._cur_entity_prefs = self._prefs.google_entity_configs
self._cur_default_expose = self._prefs.google_default_expose
self._sync_entities_lock = asyncio.Lock()
self._sync_on_started = False
@property
def enabled(self):
"""Return if Google is enabled."""
return (
self._cloud.is_logged_in
and not self._cloud.subscription_expired
and self._prefs.google_enabled
)
@property
def entity_config(self):
"""Return entity config."""
return self._config.get(CONF_ENTITY_CONFIG) or {}
@property
def secure_devices_pin(self):
"""Return entity config."""
return self._prefs.google_secure_devices_pin
@property
def should_report_state(self):
"""Return if states should be proactively reported."""
return self.enabled and self._prefs.google_report_state
@property
def local_sdk_webhook_id(self):
"""Return the local SDK webhook.
Return None to disable the local SDK.
"""
return self._prefs.google_local_webhook_id
@property
def local_sdk_user_id(self):
"""Return the user ID to be used for actions received via the local SDK."""
return self._user
@property
def cloud_user(self):
"""Return Cloud User account."""
return self._user
async def async_initialize(self):
"""Perform async initialization of config."""
await super().async_initialize()
async def hass_started(hass):
if self.enabled and GOOGLE_DOMAIN not in self.hass.config.components:
await async_setup_component(self.hass, GOOGLE_DOMAIN, {})
start.async_at_start(self.hass, hass_started)
# Remove old/wrong user agent ids
remove_agent_user_ids = []
for agent_user_id in self._store.agent_user_ids:
if agent_user_id != self.agent_user_id:
remove_agent_user_ids.append(agent_user_id)
for agent_user_id in remove_agent_user_ids:
await self.async_disconnect_agent_user(agent_user_id)
self._prefs.async_listen_updates(self._async_prefs_updated)
self.hass.bus.async_listen(
er.EVENT_ENTITY_REGISTRY_UPDATED,
self._handle_entity_registry_updated,
)
def should_expose(self, state):
"""If a state object should be exposed."""
return self._should_expose_entity_id(state.entity_id)
def _should_expose_entity_id(self, entity_id):
"""If an entity ID should be exposed."""
if entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
return False
if not self._config["filter"].empty_filter:
return self._config["filter"](entity_id)
entity_configs = self._prefs.google_entity_configs
entity_config = entity_configs.get(entity_id, {})
entity_expose = entity_config.get(PREF_SHOULD_EXPOSE)
if entity_expose is not None:
return entity_expose
entity_registry = er.async_get(self.hass)
if registry_entry := entity_registry.async_get(entity_id):
auxiliary_entity = registry_entry.entity_category in (
ENTITY_CATEGORY_CONFIG,
ENTITY_CATEGORY_DIAGNOSTIC,
)
else:
auxiliary_entity = False
default_expose = self._prefs.google_default_expose
# Backwards compat
if default_expose is None:
return not auxiliary_entity
return not auxiliary_entity and split_entity_id(entity_id)[0] in default_expose
@property
def agent_user_id(self):
"""Return Agent User Id to use for query responses."""
return self._cloud.username
@property
def has_registered_user_agent(self):
"""Return if we have a Agent User Id registered."""
return len(self._store.agent_user_ids) > 0
def get_agent_user_id(self, context):
"""Get agent user ID making request."""
return self.agent_user_id
def should_2fa(self, state):
"""If an entity should be checked for 2FA."""
entity_configs = self._prefs.google_entity_configs
entity_config = entity_configs.get(state.entity_id, {})
return not entity_config.get(PREF_DISABLE_2FA, DEFAULT_DISABLE_2FA)
async def async_report_state(self, message, agent_user_id: str):
"""Send a state report to Google."""
try:
await self._cloud.google_report_state.async_send_message(message)
except ErrorResponse as err:
_LOGGER.warning("Error reporting state - %s: %s", err.code, err.message)
async def _async_request_sync_devices(self, agent_user_id: str):
"""Trigger a sync with Google."""
if self._sync_entities_lock.locked():
return HTTPStatus.OK
async with self._sync_entities_lock:
resp = await cloud_api.async_google_actions_request_sync(self._cloud)
return resp.status
async def _async_prefs_updated(self, prefs):
"""Handle updated preferences."""
if not self._cloud.is_logged_in:
if self.is_reporting_state:
self.async_disable_report_state()
if self.is_local_sdk_active:
self.async_disable_local_sdk()
return
if self.enabled and GOOGLE_DOMAIN not in self.hass.config.components:
await async_setup_component(self.hass, GOOGLE_DOMAIN, {})
if self.should_report_state != self.is_reporting_state:
if self.should_report_state:
self.async_enable_report_state()
else:
self.async_disable_report_state()
# State reporting is reported as a property on entities.
# So when we change it, we need to sync all entities.
await self.async_sync_entities_all()
# If entity prefs are the same or we have filter in config.yaml,
# don't sync.
elif (
self._cur_entity_prefs is not prefs.google_entity_configs
or self._cur_default_expose is not prefs.google_default_expose
) and self._config["filter"].empty_filter:
self.async_schedule_google_sync_all()
if self.enabled and not self.is_local_sdk_active:
self.async_enable_local_sdk()
elif not self.enabled and self.is_local_sdk_active:
self.async_disable_local_sdk()
self._cur_entity_prefs = prefs.google_entity_configs
self._cur_default_expose = prefs.google_default_expose
async def _handle_entity_registry_updated(self, event):
"""Handle when entity registry updated."""
if not self.enabled or not self._cloud.is_logged_in:
return
# Only consider entity registry updates if info relevant for Google has changed
if event.data["action"] == "update" and not bool(
set(event.data["changes"]) & er.ENTITY_DESCRIBING_ATTRIBUTES
):
return
entity_id = event.data["entity_id"]
if not self._should_expose_entity_id(entity_id):
return
if self.hass.state != CoreState.running:
return
self.async_schedule_google_sync_all()
|
aronsky/home-assistant
|
homeassistant/components/cloud/google_config.py
|
Python
|
apache-2.0
| 8,528 | 0.000938 |
import pathlib
import urllib.error
import urllib.request
import logging
import bs4
import parmap
import pandas as pd
from betel import utils
from betel import info_files_helpers
from betel import betel_errors
class PlayAppPageScraper:
"""A class for scraping the icons and categories from Google Play Store
apps' web pages."""
_ICON_CLASS = "T75of sHb2Xb" # icon's tag's class
_APP_CATEGORY_ITEMPROP = "genre" # app's category's tag's itemprop
def __init__(self, base_url: str, storage_dir: pathlib.Path, category_filter: [str] = None):
"""Constructor.
:param base_url: base url of the apps store.
:param storage_dir: main storage directory for retrieved info.
:param category_filter: a list of categories whose apps are stored
(instead of the whole input)
"""
self._base_url = base_url
self._storage_dir = storage_dir
self._storage_dir.mkdir(exist_ok=True, parents=True)
self._info_file = storage_dir / utils.SCRAPER_INFO_FILE_NAME
self._log_file = storage_dir / utils.SCRAPER_LOG_FILE_NAME
logging.basicConfig(filename=self._log_file, filemode="a+")
self._category_filter = category_filter
def _build_app_page_url(self, app_id: str) -> str:
return self._base_url + "/details?id=" + app_id
def _get_app_page(self, app_id: str) -> bs4.BeautifulSoup:
url = self._build_app_page_url(app_id)
return _get_html(url)
def get_app_icon(self, app_id: str, subdir: pathlib.Path = "") -> None:
"""Scrapes the app icon URL from the app's Play Store details page,
downloads the corresponding app icon and saves it to
_storage_dir / subdir / icon_{app_id}.
:param app_id: the id of the app.
:param subdir: icon storage subdirectory inside _storage_dir base
directory.
"""
html = self._get_app_page(app_id)
src = self._scrape_icon_url(html)
self._download_icon(app_id, src, subdir)
def _scrape_icon_url(self, html: bs4.BeautifulSoup) -> str:
icon = html.find(class_=self._ICON_CLASS)
if icon is None:
raise betel_errors.PlayScrapingError("Icon class not found in html.")
return icon["src"]
def _download_icon(self, app_id: str, source: str, directory: pathlib.Path) -> None:
location = self._storage_dir / directory
location.mkdir(exist_ok=True, parents=True)
try:
urllib.request.urlretrieve(source, location / utils.get_app_icon_name(app_id))
except (urllib.error.HTTPError, urllib.error.URLError) as exception:
raise betel_errors.AccessError("Can not retrieve icon.", exception)
def get_app_category(self, app_id: str) -> str:
"""Scrapes the app category from the app's Play Store details page.
:param app_id: the id of the app.
:return: the category of the app in str format
"""
html = self._get_app_page(app_id)
return self._scrape_category(html).lower()
def _scrape_category(self, html: bs4.BeautifulSoup) -> str:
category = html.find(itemprop=self._APP_CATEGORY_ITEMPROP)
if category is None:
raise betel_errors.PlayScrapingError("Category itemprop not found in html.")
return category.get_text()
def store_app_info(self, app_id: str) -> None:
"""Adds an app to the data set by retrieving all the info
needed and appending it to the list of apps (kept in _info_file).
The app is only stored in the case that its category is in the
_category_filter list.
:param app_id: the id of the app.
"""
search_data_frame = utils.get_app_search_data_frame(app_id)
part_of_data_set = (
info_files_helpers.part_of_data_set(self._info_file, search_data_frame)
)
try:
if not part_of_data_set:
category = self.get_app_category(app_id)
if self._category_filter is None or category in self._category_filter:
self.get_app_icon(app_id)
self._write_app_info(app_id, category)
except betel_errors.BetelError as exception:
info = f"{app_id}, {getattr(exception, 'message', repr(exception))}"
logging.warning(info)
def _write_app_info(self, app_id: str, category: str) -> None:
app_info = _build_app_info_data_frame(app_id, category)
info_files_helpers.add_to_data(self._info_file, app_info)
def store_apps_info(self, app_ids: [str]) -> None:
"""Adds the specified apps to the data set by retrieving all the info
needed and appending them to the list of apps (kept in _info_file).
:param app_ids: array of app ids.
"""
app_ids = set(app_ids)
parmap.map(self.store_app_info, app_ids)
def _get_html(url: str) -> bs4.BeautifulSoup:
try:
page = urllib.request.urlopen(url)
soup = bs4.BeautifulSoup(page, 'html.parser')
return soup
except (urllib.error.HTTPError, urllib.error.URLError) as exception:
raise betel_errors.AccessError("Can not open URL.", exception)
def _build_app_info_data_frame(app_id: str, category: str) -> pd.DataFrame:
dictionary = {"app_id": app_id, "category": category}
return pd.DataFrame([dictionary])
|
googleinterns/betel
|
betel/app_page_scraper.py
|
Python
|
apache-2.0
| 5,385 | 0.001486 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('protocoltool', '0023_auto_20161208_1723'),
]
operations = [
migrations.RenameField(
model_name='basicdataset',
old_name='checked',
new_name='hidden',
),
]
|
switchonproject/sip-html5-protocol-tool
|
protocoltool/migrations/0024_auto_20161212_1645.py
|
Python
|
lgpl-3.0
| 399 | 0 |
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from ./eqpt_equipment import EQPT_TYPES
class Paddler(models.Model):
_name = 'eqpt.paddler'
_description = "Paddler Cycle Equipment"
_description = "Cycle paddler equipment"
eqpt_type = fields.Selection(selection=EQPT_TYPES, string="")
eqpt_id = fields.Reference(selection='_get_eqpt_models', string="Equipment")
cycle_id = fields.Many2one(comodel_name='pac.cycle', string="Cycle")
member_id = fields.Many2one(comodel_name='adm.asso.member', string="Member")
|
RemiFr82/ck_addons
|
ck_equipment/models/eqpt_paddler.py
|
Python
|
gpl-3.0
| 551 | 0.005445 |
#!/homes/janeway/zhuww/bin/py
import numpy
import pyfits
from pylab import *
#from Pgplot import *
#from ppgplot import *
#import ppgplot
from numpy import *
class Cursor:
badtimestart=[]
badtimeend=[]
lines = []
def __init__(self, ax):
self.ax = ax
#self.lx = ax.axhline(color='k') # the horiz line
self.ly = ax.axvline(color='k') # the vert line
# text location in axes coords
#self.txt = ax.text( 0.7, 0.9, '', transform=ax.transAxes)
self.Flag = True
def mouse_move(self, event):
if not event.inaxes: return
x, y = event.xdata, event.ydata
# update the line positions
#self.lx.set_ydata(y )
self.ly.set_xdata(x )
#self.txt.set_text( 'x=%1.2f, y=%1.2f'%(x,y) )
draw()
def click(self, event):
print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(
event.button, event.x, event.y, event.xdata, event.ydata)
self.xdata = event.xdata
self.ydata = event.ydata
if '%d' % (event.button) == '1':
if self.Flag:
self.badtimestart.append(self.xdata)
self.Flag = False
self.lines.append(self.ax.axvline(color='r'))
self.lines[-1].set_xdata(event.xdata)
else:
self.badtimeend.append(self.xdata)
self.lines.append(self.ax.axvline(color='k'))
self.lines[-1].set_xdata(event.xdata)
self.Flag = True
elif '%d' % (event.button) == '3':
if self.Flag:
#self.ax.axvline(color='w').set_xdata(self.badtimeend[-1])
self.lines[-1].remove()
self.lines = self.lines[:-1]
self.badtimeend = self.badtimeend[:-1]
self.Flag = False
#self.ax.lines.pop(0)
else:
#self.ax.axvline(color='w').set_xdata(self.badtimestart[-1])
self.lines[-1].remove()
self.lines = self.lines[:-1]
self.badtimestart = self.badtimestart[:-1]
self.Flag = True
#self.ax.lines.pop(0)
else:
print 'event.button: %d' % (event.button)
draw()
def endedit(self, event):
print 'Quit editing bad time intervals.'
#disconnect('button_press_event', cursor.click)
class SnaptoCursor:
"""
Like Cursor but the crosshair snaps to the nearest x,y point
For simplicity, I'm assuming x is sorted
"""
def __init__(self, ax, x, y):
self.ax = ax
self.lx = ax.axhline(color='k') # the horiz line
self.ly = ax.axvline(color='k') # the vert line
self.x = x
self.y = y
# text location in axes coords
#self.txt = ax.text( 0.7, 0.9, '', transform=ax.transAxes)
def mouse_move(self, event):
if not event.inaxes: return
x, y = event.xdata, event.ydata
indx = searchsorted(self.x, [x])[0]
x = self.x[indx]
y = self.y[indx]
# update the line positions
self.lx.set_ydata(y )
self.ly.set_xdata(x )
#self.txt.set_text( 'x=%1.2f, y=%1.2f'%(x,y) )
#print 'x=%1.2f, y=%1.2f'%(x,y)
draw()
hdulist=pyfits.open('histo.fits')
tabdata=hdulist[1].data
cols=hdulist[1].columns
#names=cols.names
#print names
counts=array(tabdata.field('COUNTS'))
time=array(tabdata.field('TIME'))
#starttime=time[0]
#time=time#-starttime
#plotxy(counts,time)
ax = subplot(111)
ax.plot(time, counts)
cursor = Cursor(ax)
#cursor = SnaptoCursor(ax, t, s)
connect('motion_notify_event', cursor.mouse_move)
connect('button_press_event', cursor.click)
duration = max(time) - min(time)
ax.set_xlim((min(time)-0.1*duration, max(time)+0.1*duration))
show()
#while not ppgplot.pgband(0)[2]=="X":
#print "click on the daigram twice to define a bad time interval:"
#badtimestart.append(ppgplot.pgband(6)[0])
#badtimeend.append(ppgplot.pgband(6)[0])
#closeplot()
badtimestart=numpy.array(cursor.badtimestart)#+starttime
badtimeend=numpy.array(cursor.badtimeend)#+starttime
print badtimestart
print badtimeend
#plot(time,counts)
#print tabdata[0]
#check gti
hdulist=pyfits.open('gti.fits')
tabdata=hdulist[1].data
#cols=hdulist[1].columns
start=tabdata.field('START')
stop=tabdata.field('STOP')
print len(start),len(stop)
for j in range(len(badtimestart)):
badlist=[]
if badtimestart[j] >= badtimeend[j]:
print "invalid bad time interval: abandon."
else:
print len(start),len(stop)
for i in range(len(start)):
if start[i] < badtimestart[j]:
if stop[i] <= badtimestart[j]:
continue
else:
if stop[i] <= badtimeend[j]:
stop[i]=badtimestart[j]
else:
start=insert(start,i+1,badtimeend[j])
stop=insert(stop,i+1,stop[i])
stop[i]=badtimestart[j]
break
else:
if start[i] < badtimeend[j]:
if stop[i] <= badtimeend[j]:
badlist.append(i)
else:
start[i]=badtimeend[j]
else:
break
start=delete(start,badlist)
stop=delete(stop,badlist)
errbar=0.5*(stop-start)
center=array(start+errbar)#-starttime
#array=0.*start+10.
array=array(0.*start+max(counts)/2)
#plotxy(array,center,symbol=1,line=None,errx=errbar,setup=0)
#closeplot()
print sum(stop-start)
col1=pyfits.Column(name="START",format = 'D',unit = 's',array=start)
col2=pyfits.Column(name="STOP",format = 'D',unit = 's',array=stop)
cols=pyfits.ColDefs([col1,col2])
tbhdu=pyfits.new_table(cols)
hdulist.append(tbhdu)
hdulist[2].header=hdulist[1].header
#print hdulist[2].header['ONTIME'],hdulist[2].header['TSTART'],hdulist[2].header['TSTOP']
hdulist[2].header['ONTIME']=sum(stop-start)
hdulist[2].header['TSTART']=start[0]
hdulist[2].header['TSTOP']=stop[len(stop)-1]
#print hdulist[2].header['ONTIME'],hdulist[2].header['TSTART'],hdulist[2].header['TSTOP']
hdulist.remove(hdulist[1])
hdulist.writeto('newgti.fits')
hdulist.close()
#plotxy(counts,time,device="gti.ps/PS")
#plotxy(array,center,symbol=1,line=None,errx=errbar,setup=0)
#closeplot()
plot(time, counts)
errorbar(center, array, xerr=errbar)
show()
|
zhuww/planetclient
|
gti.py
|
Python
|
apache-2.0
| 6,453 | 0.017666 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Participant'
db.delete_table(u'pa_participant')
# Removing M2M table for field user on 'Participant'
db.delete_table('pa_participant_user')
# Adding M2M table for field user on 'ReportingPeriod'
db.create_table(u'pa_reportingperiod_user', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('reportingperiod', models.ForeignKey(orm[u'pa.reportingperiod'], null=False)),
('user', models.ForeignKey(orm[u'pa.user'], null=False))
))
db.create_unique(u'pa_reportingperiod_user', ['reportingperiod_id', 'user_id'])
def backwards(self, orm):
# Adding model 'Participant'
db.create_table(u'pa_participant', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('reporting_period', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['pa.ReportingPeriod'])),
))
db.send_create_signal(u'pa', ['Participant'])
# Adding M2M table for field user on 'Participant'
db.create_table(u'pa_participant_user', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('participant', models.ForeignKey(orm[u'pa.participant'], null=False)),
('user', models.ForeignKey(orm[u'pa.user'], null=False))
))
db.create_unique(u'pa_participant_user', ['participant_id', 'user_id'])
# Removing M2M table for field user on 'ReportingPeriod'
db.delete_table('pa_reportingperiod_user')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'pa.activity': {
'Meta': {'object_name': 'Activity'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.Category']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'pa.activityentry': {
'Meta': {'object_name': 'ActivityEntry'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.Activity']"}),
'day': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'hour': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.User']"})
},
u'pa.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'grouping': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '15'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reporting_period': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.ReportingPeriod']"})
},
u'pa.profession': {
'Meta': {'object_name': 'Profession'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
u'pa.reportingperiod': {
'Meta': {'object_name': 'ReportingPeriod'},
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'slots_per_hour': ('django.db.models.fields.IntegerField', [], {}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pa.User']", 'symmetrical': 'False'})
},
u'pa.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.Profession']", 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
}
}
complete_apps = ['pa']
|
Mathew/psychoanalysis
|
psychoanalysis/apps/pa/migrations/0002_auto__del_participant.py
|
Python
|
mit
| 7,476 | 0.007089 |
import re
thisDict = {
"path": "thisDict",
"2": "2",
"3": "3",
"5": "5",
"7": "7",
"2 x 2": "{2} x {2}",
"3 x 2": "{3} x {2}",
"4": "{2 x 2}",
"6": "{3 x 2}",
"8": "{2 x 2} x {2}",
"16": "{2 x 2} x {2 x 2}",
"96": "{16} x {6}",
"thisModel.Root": "thisModel.Root: {96} bottles of {Favored Beverage} on the {House Element}{period}",
"Model.Root": "{thisModel.Root}",
"period": "." ,
"Favored Beverage": "b{i}r",
"House Element": "{wl}",
"i": "ee",
"wl": "wall"
}
thatDict = {
"path": "thatDict",
"13": "13",
"13 x 13": "{13} x {13}",
"169": "qqq {16}",
"thatModel.Root": "thatModel.Root: {169} books on the {House Element}-mounted {Furniture}.",
"Model.Root": "{thatModel.Root}",
"Furniture": "b{u}kshelves",
"u": "oo"
}
anotherDict = {
"path": "anotherDict",
"11": "11",
"11 x 11": "{11} x {11}",
"121": "{11 x 11}",
"anotherModel.Root": "anotherModel.Root: {121} bottles of b{i}r and {Favored Beverage} on the {Furniture}.",
"Favored Beverage": "whiskey",
"megaModel.Root" : "{thisModel.Root} ... {thatModel.Root} ... {anotherModel.Root}",
"Model.Root": "{anotherModel.Root}"
}
directory = [thisDict, thatDict, anotherDict]
#When there are more than one identical key throughout dictionaries: prioritizes current dict for now -> Later how?
#Limitation: We only go through dictionaries in the order given in directory; the first key we find will be used, always
#Had to add "path" field to all dictionaries to keep track of pathname -> had no way to print out the name of dictionary without using globals() = bad practice
def find_value(dictionary, key):
#Case when the key does not exist in this dictionary
if key not in dictionary:
for x in range(len(directory)):
if directory[x] != dictionary and key in directory[x]:
return find_value(directory[x], key)
#When the key does not exist in any of dictionaries
raise KeyError ("Key not found in directory")
#Case when the key does exist in this dictionary
else:
value = dictionary[key]
search = [x.group() for x in re.finditer(r'{(.*?)}', value)]
path = "{" + key + "}" + " from: " + dictionary["path"] + "\n"
# End case
if len(search) == 0:
return value, path
else:
for i in range(len(search)):
new_val, new_path = find_value(dictionary, search[i][1:-1])
value = value.replace(search[i], new_val, 1)
path += new_path
return value, path
value, path = find_value(thisDict, "megaModel.Root")
print("Value: ", value)
print("Path:\n", path)
#Called inside find_value when there is no such key in the given dictionary input
#Find the key from other dictionaries, keep log of which path; return
# def fetch_value(dictionary, key):
|
CommonAccord/Cmacc-Org
|
Doc/G/NW-NDA/99/WiP/Schedule/String.py
|
Python
|
mit
| 2,950 | 0.008136 |
from packetbeat import BaseTest
"""
Tests for HTTP messages with gaps (packet loss) in them.
"""
class Test(BaseTest):
def test_gap_in_large_file(self):
"""
Should recover well from losing a packet in a large
file download.
"""
self.render_config_template(
http_ports=[8000],
)
self.run_packetbeat(pcap="gap_in_stream.pcap")
objs = self.read_output()
assert len(objs) == 1
o = objs[0]
assert o["status"] == "OK"
print(o["notes"])
assert len(o["notes"]) == 1
assert o["notes"][0] == "Packet loss while capturing the response"
|
christiangalsterer/httpbeat
|
vendor/github.com/elastic/beats/packetbeat/tests/system/test_0029_http_gap.py
|
Python
|
apache-2.0
| 659 | 0 |
# -*- coding: utf-8 -*-
import sys
def __if_number_get_string(number):
converted_str = number
if isinstance(number, (int, float)):
converted_str = str(number)
return converted_str
def get_string(strOrUnicode, encoding='utf-8'):
strOrUnicode = __if_number_get_string(strOrUnicode)
return strOrUnicode
|
davidvilla/python-doublex
|
doublex/safeunicode.py
|
Python
|
gpl-3.0
| 333 | 0 |
# Copyright (c) 2010 by California Institute of Technology
# Copyright (c) 2012 by Delft University of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the names of the California Institute of Technology nor
# the Delft University of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Author: M.M. (Rene) van Paassen (using xferfcn.py as basis)
# Date: 02 Oct 12
from __future__ import division
"""
Frequency response data representation and functions.
This module contains the FRD class and also functions that operate on
FRD data.
"""
# External function declarations
from warnings import warn
import numpy as np
from numpy import angle, array, empty, ones, \
real, imag, absolute, eye, linalg, where, dot, sort
from scipy.interpolate import splprep, splev
from .lti import LTI, _process_frequency_response
from . import config
__all__ = ['FrequencyResponseData', 'FRD', 'frd']
class FrequencyResponseData(LTI):
"""FrequencyResponseData(d, w[, smooth])
A class for models defined by frequency response data (FRD).
The FrequencyResponseData (FRD) class is used to represent systems in
frequency response data form.
Parameters
----------
d : 1D or 3D complex array_like
The frequency response at each frequency point. If 1D, the system is
assumed to be SISO. If 3D, the system is MIMO, with the first
dimension corresponding to the output index of the FRD, the second
dimension corresponding to the input index, and the 3rd dimension
corresponding to the frequency points in omega
w : iterable of real frequencies
List of frequency points for which data are available.
smooth : bool, optional
If ``True``, create an interpolation function that allows the
frequency response to be computed at any frequency within the range of
frequencies give in ``w``. If ``False`` (default), frequency response
can only be obtained at the frequencies specified in ``w``.
Attributes
----------
ninputs, noutputs : int
Number of input and output variables.
omega : 1D array
Frequency points of the response.
fresp : 3D array
Frequency response, indexed by output index, input index, and
frequency point.
Notes
-----
The main data members are 'omega' and 'fresp', where 'omega' is a 1D array
of frequency points and and 'fresp' is a 3D array of frequency responses,
with the first dimension corresponding to the output index of the FRD, the
second dimension corresponding to the input index, and the 3rd dimension
corresponding to the frequency points in omega. For example,
>>> frdata[2,5,:] = numpy.array([1., 0.8-0.2j, 0.2-0.8j])
means that the frequency response from the 6th input to the 3rd output at
the frequencies defined in omega is set to the array above, i.e. the rows
represent the outputs and the columns represent the inputs.
A frequency response data object is callable and returns the value of the
transfer function evaluated at a point in the complex plane (must be on
the imaginary access). See :meth:`~control.FrequencyResponseData.__call__`
for a more detailed description.
"""
# Allow NDarray * StateSpace to give StateSpace._rmul_() priority
# https://docs.scipy.org/doc/numpy/reference/arrays.classes.html
__array_priority__ = 11 # override ndarray and matrix types
#
# Class attributes
#
# These attributes are defined as class attributes so that they are
# documented properly. They are "overwritten" in __init__.
#
#: Number of system inputs.
#:
#: :meta hide-value:
ninputs = 1
#: Number of system outputs.
#:
#: :meta hide-value:
noutputs = 1
_epsw = 1e-8 #: Bound for exact frequency match
def __init__(self, *args, **kwargs):
"""Construct an FRD object.
The default constructor is FRD(d, w), where w is an iterable of
frequency points, and d is the matching frequency data.
If d is a single list, 1d array, or tuple, a SISO system description
is assumed. d can also be
To call the copy constructor, call FRD(sys), where sys is a
FRD object.
To construct frequency response data for an existing LTI
object, other than an FRD, call FRD(sys, omega)
"""
# TODO: discrete-time FRD systems?
smooth = kwargs.get('smooth', False)
if len(args) == 2:
if not isinstance(args[0], FRD) and isinstance(args[0], LTI):
# not an FRD, but still a system, second argument should be
# the frequency range
otherlti = args[0]
self.omega = sort(np.asarray(args[1], dtype=float))
# calculate frequency response at my points
if otherlti.isctime():
s = 1j * self.omega
self.fresp = otherlti(s, squeeze=False)
else:
z = np.exp(1j * self.omega * otherlti.dt)
self.fresp = otherlti(z, squeeze=False)
else:
# The user provided a response and a freq vector
self.fresp = array(args[0], dtype=complex)
if len(self.fresp.shape) == 1:
self.fresp = self.fresp.reshape(1, 1, len(args[0]))
self.omega = array(args[1], dtype=float)
if len(self.fresp.shape) != 3 or \
self.fresp.shape[-1] != self.omega.shape[-1] or \
len(self.omega.shape) != 1:
raise TypeError(
"The frequency data constructor needs a 1-d or 3-d"
" response data array and a matching frequency vector"
" size")
elif len(args) == 1:
# Use the copy constructor.
if not isinstance(args[0], FRD):
raise TypeError(
"The one-argument constructor can only take in"
" an FRD object. Received %s." % type(args[0]))
self.omega = args[0].omega
self.fresp = args[0].fresp
else:
raise ValueError(
"Needs 1 or 2 arguments; received %i." % len(args))
# create interpolation functions
if smooth:
self.ifunc = empty((self.fresp.shape[0], self.fresp.shape[1]),
dtype=tuple)
for i in range(self.fresp.shape[0]):
for j in range(self.fresp.shape[1]):
self.ifunc[i, j], u = splprep(
u=self.omega, x=[real(self.fresp[i, j, :]),
imag(self.fresp[i, j, :])],
w=1.0/(absolute(self.fresp[i, j, :]) + 0.001), s=0.0)
else:
self.ifunc = None
LTI.__init__(self, self.fresp.shape[1], self.fresp.shape[0])
def __str__(self):
"""String representation of the transfer function."""
mimo = self.ninputs > 1 or self.noutputs > 1
outstr = ['Frequency response data']
for i in range(self.ninputs):
for j in range(self.noutputs):
if mimo:
outstr.append("Input %i to output %i:" % (i + 1, j + 1))
outstr.append('Freq [rad/s] Response')
outstr.append('------------ ---------------------')
outstr.extend(
['%12.3f %10.4g%+10.4gj' % (w, re, im)
for w, re, im in zip(self.omega,
real(self.fresp[j, i, :]),
imag(self.fresp[j, i, :]))])
return '\n'.join(outstr)
def __repr__(self):
"""Loadable string representation,
limited for number of data points.
"""
return "FrequencyResponseData({d}, {w}{smooth})".format(
d=repr(self.fresp), w=repr(self.omega),
smooth=(self.ifunc and ", smooth=True") or "")
def __neg__(self):
"""Negate a transfer function."""
return FRD(-self.fresp, self.omega)
def __add__(self, other):
"""Add two LTI objects (parallel connection)."""
if isinstance(other, FRD):
# verify that the frequencies match
if len(other.omega) != len(self.omega) or \
(other.omega != self.omega).any():
warn("Frequency points do not match; expect "
"truncation and interpolation.")
# Convert the second argument to a frequency response function.
# or re-base the frd to the current omega (if needed)
other = _convert_to_FRD(other, omega=self.omega)
# Check that the input-output sizes are consistent.
if self.ninputs != other.ninputs:
raise ValueError("The first summand has %i input(s), but the \
second has %i." % (self.ninputs, other.ninputs))
if self.noutputs != other.noutputs:
raise ValueError("The first summand has %i output(s), but the \
second has %i." % (self.noutputs, other.noutputs))
return FRD(self.fresp + other.fresp, other.omega)
def __radd__(self, other):
"""Right add two LTI objects (parallel connection)."""
return self + other
def __sub__(self, other):
"""Subtract two LTI objects."""
return self + (-other)
def __rsub__(self, other):
"""Right subtract two LTI objects."""
return other + (-self)
def __mul__(self, other):
"""Multiply two LTI objects (serial connection)."""
# Convert the second argument to a transfer function.
if isinstance(other, (int, float, complex, np.number)):
return FRD(self.fresp * other, self.omega,
smooth=(self.ifunc is not None))
else:
other = _convert_to_FRD(other, omega=self.omega)
# Check that the input-output sizes are consistent.
if self.ninputs != other.noutputs:
raise ValueError(
"H = G1*G2: input-output size mismatch: "
"G1 has %i input(s), G2 has %i output(s)." %
(self.ninputs, other.noutputs))
inputs = other.ninputs
outputs = self.noutputs
fresp = empty((outputs, inputs, len(self.omega)),
dtype=self.fresp.dtype)
for i in range(len(self.omega)):
fresp[:, :, i] = dot(self.fresp[:, :, i], other.fresp[:, :, i])
return FRD(fresp, self.omega,
smooth=(self.ifunc is not None) and
(other.ifunc is not None))
def __rmul__(self, other):
"""Right Multiply two LTI objects (serial connection)."""
# Convert the second argument to an frd function.
if isinstance(other, (int, float, complex, np.number)):
return FRD(self.fresp * other, self.omega,
smooth=(self.ifunc is not None))
else:
other = _convert_to_FRD(other, omega=self.omega)
# Check that the input-output sizes are consistent.
if self.noutputs != other.ninputs:
raise ValueError(
"H = G1*G2: input-output size mismatch: "
"G1 has %i input(s), G2 has %i output(s)." %
(other.ninputs, self.noutputs))
inputs = self.ninputs
outputs = other.noutputs
fresp = empty((outputs, inputs, len(self.omega)),
dtype=self.fresp.dtype)
for i in range(len(self.omega)):
fresp[:, :, i] = dot(other.fresp[:, :, i], self.fresp[:, :, i])
return FRD(fresp, self.omega,
smooth=(self.ifunc is not None) and
(other.ifunc is not None))
# TODO: Division of MIMO transfer function objects is not written yet.
def __truediv__(self, other):
"""Divide two LTI objects."""
if isinstance(other, (int, float, complex, np.number)):
return FRD(self.fresp * (1/other), self.omega,
smooth=(self.ifunc is not None))
else:
other = _convert_to_FRD(other, omega=self.omega)
if (self.ninputs > 1 or self.noutputs > 1 or
other.ninputs > 1 or other.noutputs > 1):
raise NotImplementedError(
"FRD.__truediv__ is currently only implemented for SISO "
"systems.")
return FRD(self.fresp/other.fresp, self.omega,
smooth=(self.ifunc is not None) and
(other.ifunc is not None))
# TODO: Remove when transition to python3 complete
def __div__(self, other):
return self.__truediv__(other)
# TODO: Division of MIMO transfer function objects is not written yet.
def __rtruediv__(self, other):
"""Right divide two LTI objects."""
if isinstance(other, (int, float, complex, np.number)):
return FRD(other / self.fresp, self.omega,
smooth=(self.ifunc is not None))
else:
other = _convert_to_FRD(other, omega=self.omega)
if (self.ninputs > 1 or self.noutputs > 1 or
other.ninputs > 1 or other.noutputs > 1):
raise NotImplementedError(
"FRD.__rtruediv__ is currently only implemented for "
"SISO systems.")
return other / self
# TODO: Remove when transition to python3 complete
def __rdiv__(self, other):
return self.__rtruediv__(other)
def __pow__(self, other):
if not type(other) == int:
raise ValueError("Exponent must be an integer")
if other == 0:
return FRD(ones(self.fresp.shape), self.omega,
smooth=(self.ifunc is not None)) # unity
if other > 0:
return self * (self**(other-1))
if other < 0:
return (FRD(ones(self.fresp.shape), self.omega) / self) * \
(self**(other+1))
# Define the `eval` function to evaluate an FRD at a given (real)
# frequency. Note that we choose to use `eval` instead of `evalfr` to
# avoid confusion with :func:`evalfr`, which takes a complex number as its
# argument. Similarly, we don't use `__call__` to avoid confusion between
# G(s) for a transfer function and G(omega) for an FRD object.
# update Sawyer B. Fuller 2020.08.14: __call__ added to provide a uniform
# interface to systems in general and the lti.frequency_response method
def eval(self, omega, squeeze=None):
"""Evaluate a transfer function at angular frequency omega.
Note that a "normal" FRD only returns values for which there is an
entry in the omega vector. An interpolating FRD can return
intermediate values.
Parameters
----------
omega : float or 1D array_like
Frequencies in radians per second
squeeze : bool, optional
If squeeze=True, remove single-dimensional entries from the shape
of the output even if the system is not SISO. If squeeze=False,
keep all indices (output, input and, if omega is array_like,
frequency) even if the system is SISO. The default value can be
set using config.defaults['control.squeeze_frequency_response'].
Returns
-------
fresp : complex ndarray
The frequency response of the system. If the system is SISO and
squeeze is not True, the shape of the array matches the shape of
omega. If the system is not SISO or squeeze is False, the first
two dimensions of the array are indices for the output and input
and the remaining dimensions match omega. If ``squeeze`` is True
then single-dimensional axes are removed.
"""
omega_array = np.array(omega, ndmin=1) # array-like version of omega
# Make sure that we are operating on a simple list
if len(omega_array.shape) > 1:
raise ValueError("input list must be 1D")
# Make sure that frequencies are all real-valued
if any(omega_array.imag > 0):
raise ValueError("FRD.eval can only accept real-valued omega")
if self.ifunc is None:
elements = np.isin(self.omega, omega) # binary array
if sum(elements) < len(omega_array):
raise ValueError(
"not all frequencies omega are in frequency list of FRD "
"system. Try an interpolating FRD for additional points.")
else:
out = self.fresp[:, :, elements]
else:
out = empty((self.noutputs, self.ninputs, len(omega_array)),
dtype=complex)
for i in range(self.noutputs):
for j in range(self.ninputs):
for k, w in enumerate(omega_array):
frraw = splev(w, self.ifunc[i, j], der=0)
out[i, j, k] = frraw[0] + 1.0j * frraw[1]
return _process_frequency_response(self, omega, out, squeeze=squeeze)
def __call__(self, s, squeeze=None):
"""Evaluate system's transfer function at complex frequencies.
Returns the complex frequency response `sys(s)` of system `sys` with
`m = sys.ninputs` number of inputs and `p = sys.noutputs` number of
outputs.
To evaluate at a frequency omega in radians per second, enter
``s = omega * 1j`` or use ``sys.eval(omega)``
For a frequency response data object, the argument must be an
imaginary number (since only the frequency response is defined).
Parameters
----------
s : complex scalar or 1D array_like
Complex frequencies
squeeze : bool, optional (default=True)
If squeeze=True, remove single-dimensional entries from the shape
of the output even if the system is not SISO. If squeeze=False,
keep all indices (output, input and, if omega is array_like,
frequency) even if the system is SISO. The default value can be
set using config.defaults['control.squeeze_frequency_response'].
Returns
-------
fresp : complex ndarray
The frequency response of the system. If the system is SISO and
squeeze is not True, the shape of the array matches the shape of
omega. If the system is not SISO or squeeze is False, the first
two dimensions of the array are indices for the output and input
and the remaining dimensions match omega. If ``squeeze`` is True
then single-dimensional axes are removed.
Raises
------
ValueError
If `s` is not purely imaginary, because
:class:`FrequencyDomainData` systems are only defined at imaginary
frequency values.
"""
# Make sure that we are operating on a simple list
if len(np.atleast_1d(s).shape) > 1:
raise ValueError("input list must be 1D")
if any(abs(np.atleast_1d(s).real) > 0):
raise ValueError("__call__: FRD systems can only accept "
"purely imaginary frequencies")
# need to preserve array or scalar status
if hasattr(s, '__len__'):
return self.eval(np.asarray(s).imag, squeeze=squeeze)
else:
return self.eval(complex(s).imag, squeeze=squeeze)
def freqresp(self, omega):
"""(deprecated) Evaluate transfer function at complex frequencies.
.. deprecated::0.9.0
Method has been given the more pythonic name
:meth:`FrequencyResponseData.frequency_response`. Or use
:func:`freqresp` in the MATLAB compatibility module.
"""
warn("FrequencyResponseData.freqresp(omega) will be removed in a "
"future release of python-control; use "
"FrequencyResponseData.frequency_response(omega), or "
"freqresp(sys, omega) in the MATLAB compatibility module "
"instead", DeprecationWarning)
return self.frequency_response(omega)
def feedback(self, other=1, sign=-1):
"""Feedback interconnection between two FRD objects."""
other = _convert_to_FRD(other, omega=self.omega)
if (self.noutputs != other.ninputs or self.ninputs != other.noutputs):
raise ValueError(
"FRD.feedback, inputs/outputs mismatch")
fresp = empty((self.noutputs, self.ninputs, len(other.omega)),
dtype=complex)
# TODO: vectorize this
# TODO: handle omega re-mapping
# TODO: is there a reason to use linalg.solve instead of linalg.inv?
# https://github.com/python-control/python-control/pull/314#discussion_r294075154
for k, w in enumerate(other.omega):
fresp[:, :, k] = np.dot(
self.fresp[:, :, k],
linalg.solve(
eye(self.ninputs)
+ np.dot(other.fresp[:, :, k], self.fresp[:, :, k]),
eye(self.ninputs))
)
return FRD(fresp, other.omega, smooth=(self.ifunc is not None))
#
# Allow FRD as an alias for the FrequencyResponseData class
#
# Note: This class was initially given the name "FRD", but this caused
# problems with documentation on MacOS platforms, since files were generated
# for control.frd and control.FRD, which are not differentiated on most MacOS
# filesystems, which are case insensitive. Renaming the FRD class to be
# FrequenceResponseData and then assigning FRD to point to the same object
# fixes this problem.
#
FRD = FrequencyResponseData
def _convert_to_FRD(sys, omega, inputs=1, outputs=1):
"""Convert a system to frequency response data form (if needed).
If sys is already an frd, and its frequency range matches or
overlaps the range given in omega then it is returned. If sys is
another LTI object or a transfer function, then it is converted to
a frequency response data at the specified omega. If sys is a
scalar, then the number of inputs and outputs can be specified
manually, as in:
>>> frd = _convert_to_FRD(3., omega) # Assumes inputs = outputs = 1
>>> frd = _convert_to_FRD(1., omegs, inputs=3, outputs=2)
In the latter example, sys's matrix transfer function is [[1., 1., 1.]
[1., 1., 1.]].
"""
if isinstance(sys, FRD):
omega.sort()
if len(omega) == len(sys.omega) and \
(abs(omega - sys.omega) < FRD._epsw).all():
# frequencies match, and system was already frd; simply use
return sys
raise NotImplementedError(
"Frequency ranges of FRD do not match, conversion not implemented")
elif isinstance(sys, LTI):
omega = np.sort(omega)
if sys.isctime():
fresp = sys(1j * omega)
else:
fresp = sys(np.exp(1j * omega * sys.dt))
if len(fresp.shape) == 1:
fresp = fresp[np.newaxis, np.newaxis, :]
return FRD(fresp, omega, smooth=True)
elif isinstance(sys, (int, float, complex, np.number)):
fresp = ones((outputs, inputs, len(omega)), dtype=float)*sys
return FRD(fresp, omega, smooth=True)
# try converting constant matrices
try:
sys = array(sys)
outputs, inputs = sys.shape
fresp = empty((outputs, inputs, len(omega)), dtype=float)
for i in range(outputs):
for j in range(inputs):
fresp[i, j, :] = sys[i, j]
return FRD(fresp, omega, smooth=True)
except Exception:
pass
raise TypeError('''Can't convert given type "%s" to FRD system.''' %
sys.__class__)
def frd(*args):
"""frd(d, w)
Construct a frequency response data model
frd models store the (measured) frequency response of a system.
This function can be called in different ways:
``frd(response, freqs)``
Create an frd model with the given response data, in the form of
complex response vector, at matching frequency freqs [in rad/s]
``frd(sys, freqs)``
Convert an LTI system into an frd model with data at frequencies
freqs.
Parameters
----------
response: array_like, or list
complex vector with the system response
freq: array_lik or lis
vector with frequencies
sys: LTI (StateSpace or TransferFunction)
A linear system
Returns
-------
sys: FRD
New frequency response system
See Also
--------
FRD, ss, tf
"""
return FRD(*args)
|
murrayrm/python-control
|
control/frdata.py
|
Python
|
bsd-3-clause
| 26,326 | 0.000076 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Xmessage(AutotoolsPackage, XorgPackage):
"""xmessage displays a message or query in a window. The user can click
on an "okay" button to dismiss it or can select one of several buttons
to answer a question. xmessage can also exit after a specified time."""
homepage = "http://cgit.freedesktop.org/xorg/app/xmessage"
xorg_mirror_path = "app/xmessage-1.0.4.tar.gz"
version('1.0.4', sha256='883099c3952c8cace5bd11d3df2e9ca143fc07375997435d5ff4f2d50353acca')
depends_on('libxaw')
depends_on('libxt')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
|
iulian787/spack
|
var/spack/repos/builtin/packages/xmessage/package.py
|
Python
|
lgpl-2.1
| 845 | 0.001183 |
import system
# Create the computer system and power it up.
sys = system.System()
sys.power_on()
|
richgieg/RichEmu86
|
main.py
|
Python
|
mit
| 98 | 0 |
"""
Pairwise distance functions between time series in a RKHS
=========================================================
They all have the following prototype:
function(K, T1, T2, **kwargs)
"""
import numpy as np
from scipy.linalg import solve, eigvals, inv
from scipy.signal import correlate2d
# mean-element-based ----------------------------------------------------------
def distance_mean_elements(K, T1, T2):
""" Compute the squared distance between mean elements of two time series
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
Returns
-------
dme2: double,
squared distance between the mean-elements in RKHS
"""
dme2 = K[:T1, :T1].mean()
dme2 += K[T1:, T1:].mean()
dme2 += -2.0 * K[:T1, T1:].mean()
# # normalization vector
# m = np.zeros((T1+T2, 1), dtype=np.double)
# m[:T1,:] = -1./T1
# m[T1:,:] = 1./T2
# # return the distance
# dme2 = np.dot(m.T, np.dot(K, m))[0,0]
return dme2
def distance_me_squared(K, T1, T2):
""" Compute the squared distance between the squared mean elements of two time series
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
Returns
-------
dme2: double,
squared HS distance between the mean-elements squared
"""
dme2 = (K[:T1, :T1].mean()) ** 2
dme2 += (K[T1:, T1:].mean()) ** 2
dme2 += -2.0 * (K[:T1, T1:].mean()) ** 2
return dme2
def distance_mahalanobis(K, T1, T2, regul=1e-3):
""" Compute the squared distance between mean elements of two time series
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
regul: double, optional, default: 1e-3,
regularization parameter
Returns
-------
dmpc2: double,
squared Mahalanobis distance between time-series in RKHS
"""
# normalization vector
n = T1 + T2
m = np.zeros((n, 1), dtype=np.double)
m[:T1, :] = -1.0 / T1
m[T1:, :] = 1.0 / T2
# centering matrix
PiT1 = np.eye(T1, dtype=np.double) - 1.0 / T1
PiT2 = np.eye(T2, dtype=np.double) - 1.0 / T2
N = np.vstack([np.hstack([PiT1, np.zeros((T1, T2), dtype=np.double)]),
np.hstack([np.zeros((T2, T1), dtype=np.double), PiT2])])
# compute the distance
mTK = np.dot(m.T, K)
me = np.dot(mTK, m) # difference between mean elements
mTKN = np.dot(mTK, N)
NTK = np.dot(N.T, K)
A = regul * np.eye(n) + 1.0 / n * np.dot(NTK, N)
AinvNTK = solve(A, NTK, overwrite_a=True) # A^{-1} N.T K
AinvNTKm = np.dot(AinvNTK, m)
dmpc2 = 1.0 / regul * (me - 1.0 / n * np.dot(mTKN, AinvNTKm))
return dmpc2[0, 0]
# alignment-based -------------------------------------------------------------
def distance_aligned_frames_truncated(K, T1, T2, tau=0):
""" Compute the squared distance between aligned frames
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: 0,
temporal shift (in frames) to apply to time series 2 before computing
alignment, using "cyclic" padding
Returns
-------
dme2: double,
squared distance between aligned frames in the RKHS
Notes
-----
Truncated verion (equivalent to zero padding)
dme2 = K[0,0] - 1/(T2-tau) * sum_{t=0}^{T2-tau} K[x1_t, x2_{t+tau}]
"""
assert T1 == T2, "the series should be of same duration"
T = T1
# constant base kernel value k(x,x)
c = K[0, 0]
# matrix of k(x,y)
Kxy = K[:T, T:]
# return the distance
return c - np.mean(np.diag(Kxy, k=tau))
def distance_aligned_frames_cyclic(K, T1, T2, tau=0):
""" Compute the squared distance between aligned frames
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: positive int, optional, default: 0,
temporal shift (in frames) to apply to time series 2 before computing
alignment, using "cyclic" padding
Returns
-------
dme2: double,
squared distance between aligned frames in the RKHS
Notes
-----
Cyclic verion
dme2 = K[0,0] - 1/T2 * sum_{t=0}^{T2} K[x1_t, x2_{(t+tau) % T2}]
"""
assert T1 == T2, "the series should be of same duration"
T = T1
# constant base kernel value k(x,x)
c = K[0, 0]
# matrix of k(x,y)
Kxy = K[:T, T:]
# return the distance
if tau:
tr = Kxy.trace(offset=tau) + Kxy.trace(offset=tau - T)
else:
tr = Kxy.trace()
return c - tr / float(T)
# auto-covariance-based -------------------------------------------------------
def distance_hsac_truncated(K, T1, T2, tau=1):
""" Compute the squared HS distance between the autocovariance operators of
two time series
|| \\scov^{(y)}_{\\tau} - \\scov^{(x)}_{\\tau} ||_{HS}^2 =
1/T**2 ( Tr(K_1 x K_1^\\tau) + Tr(K_2 x K_2^\\tau) - 2 Tr(K_{1,2} x K_{2,1}^\\tau ) )
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: -1
lag, ie time shift used in the auto-covariance computation
Returns
-------
dhsac: double,
squared Hilbert-Schmidt norm of the difference between the
auto-covariance operators, in the RKHS induced by 'frame_kern', of
the two time series
Notes
-----
Truncated version between X[:-tau] and X[tau:] (equivalent to zero padding).
"""
assert tau <= min(T1 / 2.0, T2 / 2.0), "Too big tau"
# define the truncated matrices of the non-shifted series
K1 = K[:T1 - tau, :T1 - tau]
K2 = K[T1:T1 + T2 - tau, T1:T1 + T2 - tau]
K12 = K[:T1 - tau, T1:T1 + T2 - tau]
# define the truncated matrices of the shifted series
K1tau = K[tau:T1, tau:T1]
K2tau = K[T1 + tau:, T1 + tau:]
K12tau = K[tau:T1, T1 + tau:]
# compute the different traces using Hadamard products (and sym of K)
tr1 = np.mean(K1 * K1tau)
tr2 = np.mean(K2 * K2tau)
tr12 = np.mean(K12 * K12tau) # no transpose (K21tau.T == K12tau)
# return dhsac
return tr1 + tr2 - 2 * tr12
def distance_hsac_cyclic(K, T1, T2, tau=1):
""" Compute the squared HS distance between the autocovariance operators of
two time series
|| \\scov^{(y)}_{\\tau} - \\scov^{(x)}_{\\tau} ||_{HS}^2 =
1/T**2 ( Tr(K_1 x K_1^\\tau) + Tr(K_2 x K_2^\\tau) - 2 Tr(K_{1,2} x K_{2,1}^\\tau ) )
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: -1
lag, ie time shift used in the auto-covariance computation
Returns
-------
dhsac: double,
squared Hilbert-Schmidt norm of the difference between the
auto-covariance operators, in the RKHS induced by 'frame_kern', of
the two time series
Notes
-----
Cyclic version between X and [ X[tau:], X[:tau] ].
Artefacts may arise if the two series were not synchronized and comprised
of the same number of periods.
"""
assert tau <= min(T1 / 2.0, T2 / 2.0), "Too big tau"
# define the (non-truncated) matrices of the non-shifted series
K1 = K[:T1, :T1]
K2 = K[T1:, T1:]
K12 = K[:T1, T1:]
# circular permutation of tau frames
idxs1 = np.arange(tau, T1 + tau) % T1
idxs2 = np.arange(tau, T2 + tau) % T2
# Note: no need for copy as we re-use the previous centering (indep. of frame order)
K1tau = K1[np.ix_(idxs1, idxs1)]
K2tau = K2[np.ix_(idxs2, idxs2)]
K12tau = K12[np.ix_(idxs1, idxs2)]
# compute the different traces using Hadamard products (and sym of K)
tr1 = np.mean(K1 * K1tau)
tr2 = np.mean(K2 * K2tau)
tr12 = np.mean(K12 * K12tau) # no transpose (K21tau.T == K12tau)
# return dhsac
return tr1 + tr2 - 2 * tr12
# TODO use incomplete Cholesky decomposition (ST & C chap. 6, p. 175)
def hsnorm_cross_correlation(K, T1, T2, regul=1e-3):
""" Compute the squared Hilbert-Schmidt norm of the cross-correlation
This *similarity* measures the strength of the cross-correlation between
two series, i.e. the degree to which you can linearly (in feature space!)
predict one knowing the other (0 => not linked).
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
regul: double, optional, default: 1e-3,
regularization parameter
Returns
-------
hscorr: double,
squared Hilbert-Schmidt norm of the cross-correlation operator
between time series 1 and 2, in the RKHS induced by a base kernel
Notes
-----
This is computed as a trace by solving a generalized eigenvalue problem
equivalent to the one appearing in kernel CCA.
"""
assert T1 == T2, "the series should be of same duration"
T = T1
# define the gram matrices of the series
K1 = K[:T, :T]
K2 = K[T:, T:]
# build right-hand-side symetric matrix of the gen. eigenvalue problem
A = np.zeros(K.shape)
K1_K2 = np.dot(K1, K2)
A[:T, T:] = K1_K2 # upper triangular part
A[T:, :T] = K1_K2.T # lower triangular part (symetric)
# build left-hand-side symetric matrix of the gen. eigenvalue problem
B = np.zeros(K.shape)
B[:T, :T] = (1.0 - regul) * np.dot(K1, K1) + regul * K1
B[T:, T:] = (1.0 - regul) * np.dot(K2, K2) + regul * K2
# get the eigen-values (w) of Av = wBv (generalized eigenvalue problem)
tr = float(np.mean(eigvals(A, B, overwrite_a=True)))
return tr
def distance_autocor_truncated(K, T1, T2, tau=1, regul=1e-3):
""" Compute the squared HS distance between the autocorrelation operators of
two time series
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix (assumed to be centered!)
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: 1
lag, ie time shift used in the auto-covariance computation
regul: float, optional, default: 1e-3
regularization parameter for the inverse computation
Returns
-------
dacor: double,
squared Hilbert-Schmidt norm of the difference between the
auto-correlation operators, in the RKHS induced by 'frame_kern', of
the two time series
Notes
-----
Truncated version.
"""
assert tau <= min(T1 / 2.0, T2 / 2.0), "Too big tau"
# define the truncated matrices of the non-shifted series
K1 = K[:T1 - tau, :T1 - tau]
K2 = K[T1:T1 + T2 - tau, T1:T1 + T2 - tau]
K12 = K[:T1 - tau, T1:T1 + T2 - tau]
# define the truncated matrices of the shifted series
K1tau = K[tau:T1, tau:T1]
K2tau = K[T1 + tau:, T1 + tau:]
K12tau = K[tau:T1, T1 + tau:]
# compute the different terms
N1 = regul * np.eye(T1 - tau) - solve(
(T1 - tau) * np.eye(T1 - tau) + 1.0 / regul * K1, K1, sym_pos=True)
N2 = regul * np.eye(T2 - tau) - solve(
(T2 - tau) * np.eye(T2 - tau) + 1.0 / regul * K2, K2, sym_pos=True)
KK1 = np.dot(np.dot(N1.T, K1), np.dot(N1, K1tau))
KK2 = np.dot(np.dot(N2.T, K2), np.dot(N2, K2tau))
KK12 = np.dot(np.dot(N1.T, K12), np.dot(N2, K12tau.T))
# compute the different traces
tr1 = 1.0 / ((regul ** 4) * (T1 - tau) ** 2) * KK1.trace()
tr2 = 1.0 / ((regul ** 4) * (T2 - tau) ** 2) * KK2.trace()
tr12 = 1.0 / ((regul ** 4) * (T1 - tau) * (T2 - tau)) * KK12.trace()
return tr1 + tr2 - 2.0 * tr12
def distance_autocor_cyclic(K, T1, T2, tau=1, regul=1e-3):
""" Compute the squared HS distance between the autocorrelation operators of
two time series
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix (assumed to be centered!)
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: 1
lag, ie time shift used in the auto-covariance computation
regul: float, optional, default: 1e-3
regularization parameter for the inverse computation
Returns
-------
dacor: double,
squared Hilbert-Schmidt norm of the difference between the
auto-correlation operators, in the RKHS induced by 'frame_kern', of
the two time series
Notes
-----
Cyclic version.
"""
# define per-series tau
if tau < 0.5:
# tau as a fraction of series length
tau1 = max(1, int(T1 * tau + 0.5))
tau2 = max(1, int(T2 * tau + 0.5))
elif 1 <= tau < min(T1 / 2.0, T2 / 2.0):
# constant tau: same for each series
tau1 = tau2 = int(tau)
else:
raise ValueError("Too big tau")
# define the (non-truncated) matrices of the non-shifted series
K1 = K[:T1, :T1]
K2 = K[T1:, T1:]
K12 = K[:T1, T1:]
# circular permutation of tau frames
idxs1 = np.arange(tau1, T1 + tau1) % T1
idxs2 = np.arange(tau2, T2 + tau2) % T2
# Note: no need for copy as we re-use the previous centering (indep. of frame order)
K1tau = K1[np.ix_(idxs1, idxs1)]
K2tau = K2[np.ix_(idxs2, idxs2)]
K12tau = K12[np.ix_(idxs1, idxs2)]
# compute the different terms
N1 = regul * np.eye(T1) - solve(
T1 * np.eye(T1) + 1.0 / regul * K1, K1, sym_pos=True)
N2 = regul * np.eye(T2) - solve(
T2 * np.eye(T2) + 1.0 / regul * K2, K2, sym_pos=True)
KK1 = np.dot(np.dot(N1.T, K1), np.dot(N1, K1tau))
KK2 = np.dot(np.dot(N2.T, K2), np.dot(N2, K2tau))
KK12 = np.dot(np.dot(N1.T, K12), np.dot(N2, K12tau.T))
# compute the different traces
tr1 = 1.0 / ((regul ** 4) * T1 ** 2) * KK1.trace()
tr2 = 1.0 / ((regul ** 4) * T2 ** 2) * KK2.trace()
tr12 = 1.0 / ((regul ** 4) * T1 * T2) * KK12.trace()
# TODO: check if more efficient to use Hadamard products?
return tr1 + tr2 - 2.0 * tr12
def hsdotprod_autocor_truncated(K, T1, T2, tau=1, regul=1e-3):
""" Compute the Hilbert-Schmidt inner-product between the autocorrelation
operators of two time series (**similarity**, not a distance)
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix (assumed to be centered!)
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: 1
lag, ie time shift used in the auto-covariance computation
regul: float, optional, default: 1e-3
regularization parameter for the inverse computation
Returns
-------
hsdp: double,
Hilbert-Schmidt inner product between the auto-correlation operators,
in the RKHS induced by 'frame_kern', of the two time series
Notes
-----
Truncated version.
"""
assert tau <= min(T1 / 2.0, T2 / 2.0), "Too big tau"
# define the truncated matrices of the non-shifted series
K1 = K[:T1 - tau, :T1 - tau]
K2 = K[T1:T1 + T2 - tau, T1:T1 + T2 - tau]
K12 = K[:T1 - tau, T1:T1 + T2 - tau]
# define the truncated matrices of the shifted series
K12tau = K[tau:T1, T1 + tau:]
# compute the different terms
N1 = regul * np.eye(T1 - tau) - solve(
(T1 - tau) * np.eye(T1 - tau) + 1.0 / regul * K1, K1, sym_pos=True)
N2 = regul * np.eye(T2 - tau) - solve(
(T2 - tau) * np.eye(T2 - tau) + 1.0 / regul * K2, K2, sym_pos=True)
KK12 = np.dot(np.dot(N1.T, K12), np.dot(N2, K12tau.T))
# compute the trace
hsdp = 1.0 / ((regul ** 4) * (T1 - tau) * (T2 - tau)) * KK12.trace()
return hsdp
def hsdotprod_autocor_cyclic(K, T1, T2, tau=1, regul=1e-3):
""" Compute the Hilbert-Schmidt inner-product between the autocorrelation
operators of two time series (**similarity**, not a distance)
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix (assumed to be centered!)
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: 1
lag, ie time shift used in the auto-covariance computation
regul: float, optional, default: 1e-3
regularization parameter for the inverse computation
Returns
-------
hsdp: double,
Hilbert-Schmidt inner product between the auto-correlation operators,
in the RKHS induced by 'frame_kern', of the two time series
Notes
-----
Cyclic version.
"""
# define per-series tau
if tau < 0.5:
# tau as a fraction of series legth
tau1 = max(1, int(T1 * tau + 0.5))
tau2 = max(1, int(T2 * tau + 0.5))
elif 1 <= tau < min(T1 / 2.0, T2 / 2.0):
# constant tau: same for each series
tau1 = tau2 = int(tau)
else:
raise ValueError("Too big tau")
# define the (non-truncated) matrices of the non-shifted series
K1 = K[:T1, :T1]
K2 = K[T1:, T1:]
K12 = K[:T1, T1:]
# circular permutation of tau frames
idxs1 = np.arange(tau1, T1 + tau1) % T1
idxs2 = np.arange(tau2, T2 + tau2) % T2
# Note: no need for copy as we re-use the previous centering (indep. of frame order)
K12tau = K12[np.ix_(idxs1, idxs2)]
# compute the different terms
N1 = regul * np.eye(T1) - solve(
T1 * np.eye(T1) + 1.0 / regul * K1, K1, sym_pos=True)
N2 = regul * np.eye(T2) - solve(
T2 * np.eye(T2) + 1.0 / regul * K2, K2, sym_pos=True)
KK12 = np.dot(np.dot(N1.T, K12), np.dot(N2, K12tau.T))
# compute the trace
hsdp = 1.0 / ((regul ** 4) * T1 * T2) * KK12.trace()
return hsdp
# auto-regressive-model-based -------------------------------------------------
def distance_predictive_codings(K, T1, T2, tau=1, regul=1e-3):
""" Compute the squared HS distance between the parameters of AR(p) models
(in feature space) of two time series
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix (assumed to be centered!)
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: 1
order of the AR models (use tau past frames)
regul: float, optional, default: 1e-3
regularization parameter for the inverse computation
Returns
-------
dpc: double,
squared Hilbert-Schmidt norm of the difference between the AR(p) models
learned by kernel ridge regression in the RKHS induced by 'frame_kern'
"""
p = int(tau)
assert 1 <= p < min(T1 / 2.0, T2 / 2.0), \
"Too big p (p=%d >= %d or %d)" % (p, T1 / 2.0, T2 / 2.0)
K1 = K[:T1, :T1]
K2 = K[T1:, T1:]
K12 = K[:T1, T1:]
# compute the convolutions
Ip = np.eye(p)
S1 = correlate2d(K1[:-1, :-1], Ip, mode='valid')
S2 = correlate2d(K2[:-1, :-1], Ip, mode='valid')
S21 = correlate2d(K12.T[:-1, :-1], Ip, mode='valid')
# compute the inverses
# TODO: rewrite formula better (to replace inv with solve and convolutions by products?)
Q1 = inv(regul * np.eye(T1 - p) + S1)
Q2 = inv(regul * np.eye(T2 - p) + S2)
# compute the product terms
P1 = np.dot(np.dot(Q1, K1[p:, p:]), np.dot(Q1, S1))
P2 = np.dot(np.dot(Q2, K2[p:, p:]), np.dot(Q2, S2))
P12 = np.dot(np.dot(Q1, K12[p:, p:]), np.dot(Q2, S21))
# compute the different traces
return 1.0 / T1 * P1.trace() + 1.0 / T2 * P2.trace() - 2.0 / T1 * P12.trace()
def distance_dual_predictive_codings(K, T1, T2, tau=1, regul=1e-3):
""" Compute the squared HS distance between the dual parameters of AR(p)
models (in feature space) of two time series
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix (assumed to be centered!)
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: 1
order of the AR models (use tau past frames)
regul: float, optional, default: 1e-3
regularization parameter for the inverse computation
Returns
-------
ddpc: double,
squared Hilbert-Schmidt norm of the difference between the dual
parameters of AR(p) models learned by kernel ridge regression in the
RKHS induced by 'frame_kern'
"""
p = int(tau)
assert 1 <= p < min(T1 / 2.0, T2 / 2.0), \
"Too big p (p=%d >= %d or %d)" % (p, T1 / 2.0, T2 / 2.0)
K1 = K[:T1, :T1]
K2 = K[T1:, T1:]
K12 = K[:T1, T1:]
# compute the convolutions
Ip = np.eye(p)
S1 = correlate2d(K1[:-1, :-1], Ip, mode='valid')
S2 = correlate2d(K2[:-1, :-1], Ip, mode='valid')
# compute the inverses
# XXX incomplete Cholesky would be better but is 3x slower...
Q1 = inv(regul * np.eye(T1 - p) + S1)
Q2 = inv(regul * np.eye(T2 - p) + S2)
# compute the product terms
P1 = np.dot(np.dot(Q1, K1[p:, p:]), Q1)
P2 = np.dot(np.dot(Q2, K2[p:, p:]), Q2)
P12 = np.dot(np.dot(Q1, K12[p:, p:]), Q2)
# compute the different traces
return 1.0 / T1 * P1.trace() + 1.0 / T2 * P2.trace() - 2.0 / T1 * P12.trace()
# FOR DEBUG PURPOSES
def distance_hsac_decomp(K, T1, T2, tau=1, mode="truncated"):
""" Return the components 1/T**2 * (tr1, tr2, tr12) of HSAC
mode {"truncated"/"cyclic"} defines way to compute HSAC
"""
assert mode in ["truncated", "cyclic"], "Unknown HSAC mode (%s)" % mode
assert T1 == T2, "the series should be of same duration"
assert tau <= T1 / 2.0, "Too big tau"
T = T1
if mode == "truncated":
# define the truncated matrices of the non-shifted series
K1 = K[:T - tau, :T - tau]
K2 = K[T:T + T - tau, T:T + T - tau]
K12 = K[:T - tau, T:T + T - tau]
# define the truncated matrices of the shifted series
K1tau = K[tau:T, tau:T]
K2tau = K[T + tau:, T + tau:]
K12tau = K[tau:T, T + tau:]
# normalization factor
nzf = 1.0 / ((T - tau) * (T - tau))
elif mode == "cyclic":
# define the (non-truncated) matrices of the non-shifted series
K1 = K[:T, :T]
K2 = K[T:, T:]
K12 = K[:T, T:]
# circular permutation of tau frames
idxs = np.arange(tau, T + tau) % T
# indexes used to make the permuted views of the kernel matrix
perm_slice = np.ix_(idxs, idxs)
# Note: no need for copy as we re-use the previous centering (indep. of frame order)
K1tau = K1[perm_slice]
K2tau = K2[perm_slice]
K12tau = K12[perm_slice]
# normalization factor
nzf = 1.0 / (T * T)
# compute the different traces using Hadamard products
tr1 = nzf * (K1 * K1tau.T).sum()
tr2 = nzf * (K2 * K2tau.T).sum()
tr12 = nzf * (K12 * K12tau.T).sum() # do not forget the transpose!
return (tr1, tr2, tr12)
def _get_centered_gram(kern_mat, is_sym=True):
""" Center (NOT in place) the Gram (kernel) matrix in the feature space
Mathematical operation: K <- PKP where P = eye(n) - 1/n ones((n,n))
Parameters
----------
kern_mat: (n,n) symmetric positve semi-definite kernel matrix
is_sym: boolean (default: True), assume the matrix is symmetric
Returns
-------
cmat: the centered gram matrix
"""
# number of rows and cols
nr, nc = kern_mat.shape
assert not is_sym or nr == nc, "Matrix cannot be symmetric if not square!"
# mean of the columns of the original matrix (as (nc,) row vector)
cms = np.mean(kern_mat, 0).reshape((1, nc))
# mean of the rows (as (nr,1) column vector)
if is_sym:
rms = cms.reshape((nr, 1))
else:
rms = np.mean(kern_mat, 1).reshape((nr, 1))
# mean of the means over columns
mcm = np.mean(cms) # precomputed once for efficiency
# return the centered matrix (using array broadcasting)
return kern_mat + mcm - cms - rms
|
daien/daco
|
distances_rkhs.py
|
Python
|
mit
| 24,666 | 0.000568 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Tuxemon
# Copyright (C) 2014, William Edwards <shadowapex@gmail.com>,
# Benjamin Bean <superman2k5@gmail.com>
#
# This file is part of Tuxemon.
#
# Tuxemon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tuxemon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tuxemon. If not, see <http://www.gnu.org/licenses/>.
#
# Contributor(s):
#
# Benjamin Bean <superman2k5@gmail.com>
# Leif Theden <leif.theden@gmail.com>
#
#
# core.states.combat Combat Start module
#
#
from __future__ import division
import logging
from collections import namedtuple, defaultdict
from functools import partial
from itertools import chain
from operator import attrgetter
import pygame
from core import tools, state
from core.components.locale import translator
from core.components.pyganim import PygAnimation
from core.components.sprite import Sprite
from core.components.technique import Technique
from core.components.ui.draw import GraphicBox
from core.components.ui.text import TextArea
from .combat_animations import CombatAnimations
trans = translator.translate
# Create a logger for optional handling of debug messages.
logger = logging.getLogger(__name__)
logger.debug("%s successfully imported" % __name__)
EnqueuedAction = namedtuple("EnqueuedAction", "user technique target")
faint = Technique("status_faint")
def check_status(monster, status_name):
return any(t for t in monster.status if t.slug == status_name)
def fainted(monster):
return check_status(monster, "status_faint")
def get_awake_monsters(player):
""" Iterate all non-fainted monsters in party
:param player:
:return:
"""
for monster in player.monsters:
if not fainted(monster):
yield monster
def fainted_party(party):
return all(map(fainted, party))
def defeated(player):
return fainted_party(player.monsters)
class WaitForInputState(state.State):
""" Just wait for input blocking everything
"""
def process_event(self, event):
if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
self.game.pop_state(self)
class CombatState(CombatAnimations):
""" The state-menu responsible for all combat related tasks and functions.
.. image:: images/combat/monster_drawing01.png
General description of this class:
* implements a simple state machine
* various phases are executed using a queue of actions
* "decision queue" is used to queue player interactions/menus
* this class holds mostly logic, though some graphical functions exist
* most graphical functions are contained in "CombatAnimations" class
Currently, status icons are implemented as follows:
each round, all status icons are destroyed
status icons are created for each status on each monster
obvs, not ideal, maybe someday make it better? (see transition_phase)
"""
background_filename = "gfx/ui/combat/battle_bg03.png"
draw_borders = False
escape_key_exits = False
def startup(self, **kwargs):
self.max_positions = 1 # TODO: make dependant on match type
self.phase = None
self.monsters_in_play = defaultdict(list)
self._damage_map = defaultdict(set) # track damage so experience can be awarded later
self._technique_cache = dict() # cache for technique animations
self._decision_queue = list() # queue for monsters that need decisions
self._position_queue = list() # queue for asking players to add a monster into play (subject to change)
self._action_queue = list() # queue for techniques, items, and status effects
self._status_icons = list() # list of sprites that are status icons
self._monster_sprite_map = dict() # monster => sprite
self._hp_bars = dict() # monster => hp bar
self._layout = dict() # player => home areas on screen
self._animation_in_progress = False # if true, delay phase change
self._winner = None # when set, combat ends
self._round = 0
super(CombatState, self).startup(**kwargs)
self.players = list(self.players)
self.show_combat_dialog()
self.transition_phase("begin")
self.task(partial(setattr, self, "phase", "ready"), 3)
def update(self, time_delta):
""" Update the combat state. State machine is checked.
General operation:
* determine what phase to execute
* if new phase, then run transition into new one
* update the new phase, or the current one
"""
super(CombatState, self).update(time_delta)
if not self._animation_in_progress:
new_phase = self.determine_phase(self.phase)
if new_phase:
self.phase = new_phase
self.transition_phase(new_phase)
self.update_phase()
def draw(self, surface):
super(CombatState, self).draw(surface)
self.draw_hp_bars()
def draw_hp_bars(self):
""" Go through the HP bars and redraw them
:returns: None
"""
for monster, hud in self.hud.items():
rect = pygame.Rect(0, 0, tools.scale(70), tools.scale(8))
rect.right = hud.image.get_width() - tools.scale(8)
rect.top += tools.scale(12)
self._hp_bars[monster].draw(hud.image, rect)
def determine_phase(self, phase):
""" Determine the next phase and set it
Part of state machine
Only test and set new phase.
* Do not execute phase actions
* Try not to modify any values
* Return a phase name and phase will change
* Return None and phase will not change
:returns: None or String
"""
if phase == "ready":
return "housekeeping phase"
elif phase == "housekeeping phase":
# this will wait for players to fill battleground positions
for player in self.active_players:
positions_available = self.max_positions - len(self.monsters_in_play[player])
if positions_available:
return
return "decision phase"
elif phase == "decision phase":
# assume each monster executes one action
# if number of actions == monsters, then all monsters are ready
if len(self._action_queue) == len(self.active_monsters):
return "pre action phase"
# TODO: change check so that it doesn't change state
# (state is changed because check_match_status will modify _winner)
# if a player runs, it will be known here
self.determine_winner()
if self._winner:
return "ran away"
elif phase == "pre action phase":
return "action phase"
if phase == "action phase":
if not self._action_queue:
return "post action phase"
elif phase == "post action phase":
if not self._action_queue:
return "resolve match"
elif phase == "ran away":
return "end combat"
elif phase == "has winner":
return "end combat"
elif phase == "resolve match":
if self._winner:
return "has winner"
else:
return "housekeeping phase"
def transition_phase(self, phase):
""" Change from one phase from another.
Part of state machine
* Will be run just -once- when phase changes.
* Do not change phase.
* Execute code only to change into new phase.
* The phase's update will be executed -after- this
:param phase:
:return:
"""
if phase == "housekeeping phase":
self._round += 1
# fill all battlefield positions, but on round 1, don't ask
self.fill_battlefield_positions(ask=self._round > 1)
if phase == "decision phase":
self.reset_status_icons()
if not self._decision_queue:
for player in self.human_players:
# the decision queue tracks human players who need to choose an
# action
self._decision_queue.extend(self.monsters_in_play[player])
for trainer in self.ai_players:
for monster in self.monsters_in_play[trainer]:
opponents = self.monsters_in_play[self.players[0]]
action, target = monster.ai.make_decision(monster, opponents)
self.enqueue_action(monster, action, target)
elif phase == "action phase":
self._action_queue.sort(key=attrgetter("user.speed"))
# TODO: Running happens somewhere else, it should be moved here i think.
# TODO: Sort other items not just healing, Swap/Run?
#Create a new list for items, possibly running/swap
#sort items by speed of monster applied to
#remove items from action_queue and insert them into their new location
precedent = []
for action in self._action_queue:
if action.technique.effect == 'heal':
precedent.append(action)
#sort items by fastest target
precedent.sort(key=attrgetter("target.speed"))
for action in precedent:
self._action_queue.remove(action)
self._action_queue.insert(0,action)
elif phase == "post action phase":
# apply status effects to the monsters
for monster in self.active_monsters:
for technique in monster.status:
self.enqueue_action(None, technique, monster)
elif phase == "resolve match":
self.determine_winner()
elif phase == "ran away":
# after 3 seconds, push a state that blocks until enter is pressed
# after the state is popped, the combat state will clean up and close
# if you run in PvP, you need "defeated message"
self.task(partial(self.game.push_state, "WaitForInputState"), 1)
self.suppress_phase_change(1)
elif phase == "has winner":
if self._winner:
# TODO: proper match check, etc
if self._winner.name == "Maple":
self.alert(trans('combat_defeat'))
else:
self.alert(trans('combat_victory'))
# after 3 seconds, push a state that blocks until enter is pressed
# after the state is popped, the combat state will clean up and close
self.task(partial(self.game.push_state, "WaitForInputState"), 1)
self.suppress_phase_change(1)
elif phase == "end combat":
self.end_combat()
def update_phase(self):
""" Execute/update phase actions
Part of state machine
* Do not change phase.
* Will be run each iteration phase is active.
* Do not test conditions to change phase.
:return: None
"""
if self.phase == "decision phase":
# show monster action menu for human players
if self._decision_queue:
monster = self._decision_queue.pop()
self.show_monster_action_menu(monster)
elif self.phase == "action phase":
self.handle_action_queue()
elif self.phase == "post action phase":
self.handle_action_queue()
def handle_action_queue(self):
""" Take one action from the queue and do it
:return: None
"""
if self._action_queue:
action = self._action_queue.pop()
self.perform_action(*action)
self.check_party_hp()
self.task(self.animate_party_status, 3)
def ask_player_for_monster(self, player):
""" Open dialog to allow player to choose a TXMN to enter into play
:param player:
:return:
"""
def add(menuitem):
monster = menuitem.game_object
if monster.current_hp == 0:
tools.open_dialog(self.game, [trans("combat_fainted", parameters={"name":monster.name})])
elif monster in self.active_monsters:
tools.open_dialog(self.game, [trans("combat_isactive", parameters={"name":monster.name})])
msg = trans("combat_replacement_is_fainted")
tools.open_dialog(self.game, [msg])
else:
self.add_monster_into_play(player, monster)
self.game.pop_state()
state = self.game.push_state("MonsterMenuState")
# must use a partial because alert relies on a text box that may not exist
# until after the state hs been startup
state.task(partial(state.alert, trans("combat_replacement")), 0)
state.on_menu_selection = add
def fill_battlefield_positions(self, ask=False):
""" Check the battlefield for unfilled positions and send out monsters
:param ask: bool. if True, then open dialog for human players
:return:
"""
# TODO: let work for trainer battles
humans = list(self.human_players)
# TODO: integrate some values for different match types
released = False
for player in self.active_players:
positions_available = self.max_positions - len(self.monsters_in_play[player])
if positions_available:
available = get_awake_monsters(player)
for i in range(positions_available):
released = True
if player in humans and ask:
self.ask_player_for_monster(player)
else:
self.add_monster_into_play(player, next(available))
if released:
self.suppress_phase_change()
def add_monster_into_play(self, player, monster):
"""
:param player:
:param monster:
:return:
"""
# TODO: refactor some into the combat animations
feet = list(self._layout[player]['home'][0].center)
feet[1] += tools.scale(11)
self.animate_monster_release_bottom(feet, monster)
self.build_hud(self._layout[player]['hud'][0], monster)
self.monsters_in_play[player].append(monster)
# TODO: not hardcode
if player is self.players[0]:
self.alert(trans('combat_call_tuxemon', {"name": monster.name.upper()}))
else:
self.alert(trans('combat_wild_appeared', {"name": monster.name.upper()}))
def reset_status_icons(self):
""" Update/reset status icons for monsters
TODO: caching, etc
"""
# remove all status icons
for s in self._status_icons:
self.sprites.remove(s)
# add status icons
for monster in self.active_monsters:
for status in monster.status:
if status.icon:
# get the rect of the monster
rect = self._monster_sprite_map[monster].rect
# load the sprite and add it to the display
self.load_sprite(status.icon, layer=200, center=rect.topleft)
def show_combat_dialog(self):
""" Create and show the area where battle messages are displayed
"""
# make the border and area at the bottom of the screen for messages
x, y, w, h = self.game.screen.get_rect()
rect = pygame.Rect(0, 0, w, h // 4)
rect.bottomright = w, h
border = tools.load_and_scale(self.borders_filename)
self.dialog_box = GraphicBox(border, None, self.background_color)
self.dialog_box.rect = rect
self.sprites.add(self.dialog_box, layer=100)
# make a text area to show messages
self.text_area = TextArea(self.font, self.font_color)
self.text_area.rect = self.dialog_box.calc_inner_rect(self.dialog_box.rect)
self.sprites.add(self.text_area, layer=100)
def show_monster_action_menu(self, monster):
""" Show the main window for choosing player actions
:param monster: Monster to choose an action for
:type monster: core.components.monster.Monster
:returns: None
"""
message = trans('combat_monster_choice', {"name": monster.name})
self.alert(message)
x, y, w, h = self.game.screen.get_rect()
rect = pygame.Rect(0, 0, w // 2.5, h // 4)
rect.bottomright = w, h
state = self.game.push_state("MainCombatMenuState", columns=2)
state.monster = monster
state.rect = rect
def skip_phase_change(self):
""" Skip phase change animations
Useful if player wants to skip a battle animation
"""
for ani in self.animations:
ani.finish()
def enqueue_action(self, user, technique, target=None):
""" Add some technique or status to the action queue
:param user:
:param technique:
:param target:
:returns: None
"""
self._action_queue.append(EnqueuedAction(user, technique, target))
def remove_monster_actions_from_queue(self, monster):
""" Remove all queued actions for a particular monster
This is used mainly for removing actions after monster is fainted
:type monster: core.components.monster.Monster
:returns: None
"""
to_remove = set()
for action in self._action_queue:
if action.user is monster or action.target is monster:
to_remove.add(action)
[self._action_queue.remove(action) for action in to_remove]
def suppress_phase_change(self, delay=3):
""" Prevent the combat phase from changing for a limited time
Use this function to prevent the phase from changing. When
animating elements of the phase, call this to prevent player
input as well as phase changes.
:param delay:
:return:
"""
if self._animation_in_progress:
logger.debug("double suppress: bug?")
else:
self._animation_in_progress = True
self.task(partial(setattr, self, "_animation_in_progress", False), delay)
def perform_action(self, user, technique, target=None):
""" Do something with the thing: animated
:param user:
:param technique: Not a dict: a Technique or Item
:param target:
:returns:
"""
technique.advance_round()
# This is the time, in seconds, that the animation takes to finish.
action_time = 3.0
result = technique.use(user, target)
try:
tools.load_sound(technique.sfx).play()
except AttributeError:
pass
# action is performed, so now use sprites to animate it
# this value will be None if the target is off screen
target_sprite = self._monster_sprite_map.get(target, None)
# slightly delay the monster shake, so technique animation
# is synchronized with the damage shake motion
hit_delay = 0
if user:
message = trans('combat_used_x', {"user": user.name, "name": technique.name})
# TODO: a real check or some params to test if should tackle, etc
if result["should_tackle"]:
hit_delay += .5
user_sprite = self._monster_sprite_map[user]
self.animate_sprite_tackle(user_sprite)
if target_sprite:
self.task(partial(self.animate_sprite_take_damage, target_sprite), hit_delay + .2)
self.task(partial(self.blink, target_sprite), hit_delay + .6)
# Track damage
self._damage_map[target].add(user)
else: # assume this was an item used
if result["name"] == "capture":
message += "\n" + trans('attempting_capture')
self.task(partial(self.animate_capture_monster, result["success"], result["num_shakes"], target))
action_time = result["num_shakes"] + 1.8
if result["success"]: # end combat right here
self.task(self.end_combat, action_time + 0.5) # Display 'Gotcha!' first.
self.task(partial(self.alert, trans('gotcha')), action_time)
self.alert(message)
self._animation_in_progress = True
return
if result["success"]:
message += "\n" + trans('item_success')
else:
message += "\n" + trans('item_failure')
self.alert(message)
self.suppress_phase_change(action_time)
else:
if result["success"]:
self.suppress_phase_change()
self.alert(trans('combat_status_damage', {"name": target.name, "status": technique.name}))
if result["success"] and target_sprite and hasattr(technique, "images"):
tech_sprite = self.get_technique_animation(technique)
tech_sprite.rect.center = target_sprite.rect.center
self.task(tech_sprite.image.play, hit_delay)
self.task(partial(self.sprites.add, tech_sprite, layer=50), hit_delay)
self.task(tech_sprite.kill, 3)
def faint_monster(self, monster):
""" Instantly make the monster faint (will be removed later)
:type monster: core.components.monster.Monster
:returns: None
"""
monster.current_hp = 0
monster.status = [faint]
"""
Experience is earned when the target monster is fainted.
Any monsters who contributed any amount of damage will be awarded
Experience is distributed evenly to all participants
"""
if monster in self._damage_map:
# Award Experience
awarded_exp = monster.total_experience / monster.level / len(self._damage_map[monster])
for winners in self._damage_map[monster]:
winners.give_experience(awarded_exp)
# Remove monster from damage map
del self._damage_map[monster]
def animate_party_status(self):
""" Animate monsters that need to be fainted
* Animation to remove monster is handled here
TODO: check for faint status, not HP
:returns: None
"""
for player in self.monsters_in_play.keys():
for monster in self.monsters_in_play[player]:
if fainted(monster):
self.alert(trans('combat_fainted', {"name": monster.name}))
self.animate_monster_faint(monster)
self.suppress_phase_change(3)
def check_party_hp(self):
""" Apply status effects, then check HP, and party status
* Monsters will be removed from play here
:returns: None
"""
for player in self.monsters_in_play.keys():
for monster in self.monsters_in_play[player]:
self.animate_hp(monster)
if monster.current_hp <= 0 and not fainted(monster):
self.remove_monster_actions_from_queue(monster)
self.faint_monster(monster)
def get_technique_animation(self, technique):
""" Return a sprite usable as a technique animation
TODO: move to some generic animation loading thingy
:type technique: core.components.technique.Technique
:rtype: core.components.sprite.Sprite
"""
try:
return self._technique_cache[technique]
except KeyError:
sprite = self.load_technique_animation(technique)
self._technique_cache[technique] = sprite
return sprite
@staticmethod
def load_technique_animation(technique):
"""
TODO: move to some generic animation loading thingy
:param technique:
:rtype: core.components.sprite.Sprite
"""
frame_time = .09
images = list()
for fn in technique.images:
image = tools.load_and_scale(fn)
images.append((image, frame_time))
tech = PygAnimation(images, False)
sprite = Sprite()
sprite.image = tech
sprite.rect = tech.get_rect()
return sprite
@property
def active_players(self):
""" Generator of any non-defeated players/trainers
:rtype: collections.Iterable[core.components.player.Player]
"""
for player in self.players:
if not defeated(player):
yield player
@property
def human_players(self):
for player in self.players:
if player.isplayer:
yield player
@property
def ai_players(self):
for player in set(self.active_players) - set(self.human_players):
yield player
@property
def active_monsters(self):
""" List of any non-defeated monsters on battlefield
:rtype: list
"""
return list(chain.from_iterable(self.monsters_in_play.values()))
def remove_player(self, player):
# TODO: non SP things
self.players.remove(player)
self.suppress_phase_change()
self.alert(trans('combat_player_run'))
def determine_winner(self):
""" Determine if match should continue or not
:return:
"""
if self._winner:
return
players = list(self.active_players)
if len(players) == 1:
self._winner = players[0]
def end_combat(self):
""" End the combat
"""
# TODO: End combat differently depending on winning or losing
# clear action queue
self._action_queue = list()
event_engine = self.game.event_engine
fadeout_action = namedtuple("action", ["type", "parameters"])
fadeout_action.type = "fadeout_music"
fadeout_action.parameters = [1000]
event_engine.actions["fadeout_music"]["method"](self.game, fadeout_action)
# remove any menus that may be on top of the combat state
while self.game.current_state is not self:
self.game.pop_state()
self.game.push_state("FadeOutTransition", caller=self)
|
nikitakurylev/TuxemonX
|
tuxemon/core/states/combat/combat.py
|
Python
|
gpl-3.0
| 27,084 | 0.001625 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Liblbxutil(AutotoolsPackage):
"""liblbxutil - Low Bandwith X extension (LBX) utility routines."""
homepage = "http://cgit.freedesktop.org/xorg/lib/liblbxutil"
url = "https://www.x.org/archive/individual/lib/liblbxutil-1.1.0.tar.gz"
version('1.1.0', '2735cd23625d4cc870ec4eb7ca272788')
depends_on('xextproto@7.0.99.1:', type='build')
depends_on('xproto', type='build')
depends_on('pkg-config@0.9.0:', type='build')
depends_on('util-macros', type='build')
# There is a bug in the library that causes the following messages:
# undefined symbol: Xfree
# undefined symbol: Xalloc
# See https://bugs.freedesktop.org/show_bug.cgi?id=8421
# Adding a dependency on libxdmcp and adding LIBS=-lXdmcp did not fix it
|
skosukhin/spack
|
var/spack/repos/builtin/packages/liblbxutil/package.py
|
Python
|
lgpl-2.1
| 2,028 | 0.000986 |
from locust import HttpLocust, TaskSet, task
class UserBehavior(TaskSet):
def on_start(self):
""" on_start is called when a Locust start before any task is scheduled """
self.login()
def login(self):
# do a login here
# self.client.post("/login", {"username":"ellen_key", "password":"education"})
pass
@task(2)
def index(self):
self.client.get("/")
@task(1)
def project1(self):
self.client.get("/app/category/featured/")
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait=5000
max_wait=9000
|
PyBossa/pybossa-locust
|
mainandprojects.py
|
Python
|
agpl-3.0
| 600 | 0.011667 |
from __future__ import absolute_import
import redisext.backend.abc
import rm.rmredis
class Client(redisext.backend.abc.IClient):
def __init__(self, database=None, role=None):
self._redis = rm.rmredis.RmRedis.get_instance(database, role)
class Connection(redisext.backend.abc.IConnection):
CLIENT = Client
|
mylokin/redisext
|
redisext/backend/rmredis.py
|
Python
|
mit
| 326 | 0 |
mystring="40523116"
mystring=mystring +" test"
print(mystring)
|
s40523116/2016fallcp_hw
|
w4.py
|
Python
|
agpl-3.0
| 66 | 0.045455 |
import pyotherside
from firebase import firebase
firebase = firebase.FirebaseApplication('https://hacker-news.firebaseio.com', None)
responses = []
getItemsCount = 0
eventCount = 0
itemIDs = []
def getCommentsForItem(itemID):
itemID = int(itemID)
itemID = str(itemID)
item = firebase.get('/v0/item/'+itemID, None)
# print(item)
# print(item['kids'])
for commentID in item['kids']:
# print("inside loop. kid:", commentID)
commentID = str(commentID)
comment = firebase.get_async('/v0/item/'+commentID, None, callback=cbNewComment)
def getItems(items, startID=None, count=None):
if items is None and startID is None and count is None:
return
currentlyDownloading(True)
global getItemsCount
global itemIDs
items = str(items)
if count is None:
getItemsCount = 30
else:
getItemsCount = count
if startID is None:
itemIDs = firebase.get('/v0/'+items, None)
if len(itemIDs) < getItemsCount:
getItemsCount = len(itemIDs)
else:
allIDs = firebase.get('/v0/'+items, None)
startIDFound = False
for i in allIDs:
if i == startID:
startIDFound = True
continue
if startIDFound is False:
continue
itemIDs.append(i)
if len(itemIDs) >= getItemsCount:
break
if len(itemIDs) == 0:
resetDownloader()
currentlyDownloading(False)
return
if len(itemIDs) < getItemsCount:
getItemsCount = len(itemIDs)
itemID = None
i = 0
for itemID in itemIDs:
itemID = str(itemID)
item = firebase.get_async('/v0/item/'+itemID, None, callback=cbNewItem)
i += 1
if i >= getItemsCount:
break
def cbNewItem(response):
global eventCount
eventCount += 1
pyotherside.send('item-downloaded')
bufferResponse(response)
def cbNewComment(response):
# print("cbNewComment", response)
pyotherside.send('comment-downloaded', response)
def bufferResponse(response):
global getItemsCount
global eventCount
global itemIDs
global responses
responses.append(response)
# print(eventCount, getItemsCount)
if eventCount == getItemsCount:
orderedResponses = []
# print(itemIDs)
for r in responses:
index = itemIDs.index(r['id'])
if index is None:
continue
orderedResponses.insert(index, r)
sendResponses(orderedResponses)
def sendResponses(responses):
for r in responses:
pyotherside.send('new-item', r)
resetDownloader()
currentlyDownloading(False)
def resetDownloader():
global eventCount
global itemIDs
global responses
global getItemsCount
eventCount = 0
itemIDs[:] = []
responses[:] = []
getItemsCount = 0
def currentlyDownloading(b):
pyotherside.send('items-currently-downloading', b)
|
neversun/sailfish-hackernews
|
src/main.py
|
Python
|
mit
| 3,028 | 0.000661 |
"""
Semiconducting Materials from Analogy and Chemical Theory
A collection of fast screening tools from elemental data
"""
# get correct path for datafiles when called from another directory
from builtins import filter
from builtins import map
from builtins import range
from builtins import object
from os import path
module_directory = path.abspath(path.dirname(__file__))
data_directory = path.join(module_directory, 'data')
import itertools
from math import gcd
from operator import mul as multiply
from smact import data_loader
class Element(object):
"""Collection of standard elemental properties for given element.
Data is drawn from "data/element.txt", part of the Open Babel
package.
Atoms with a defined oxidation state draw properties from the
"Species" class.
Attributes:
Element.symbol (string) : Elemental symbol used to retrieve data
Element.name (string) : Full name of element
Element.number (int) : Proton number of element
Element.pauling_eneg (float) : Pauling electronegativity (0.0 if unknown)
Element.ionpot (float) : Ionisation potential in eV (0.0 if unknown)
Element.e_affinity (float) : Electron affinity in eV (0.0 if unknown)
Element.dipol (float) : Static dipole polarizability in 1.6488e-41 C m^2 / V (0.0 if unknown)
Element.eig (float) : Electron eigenvalue (units unknown) N.B. For Cu, Au and Ag this defaults to d-orbital
Element.eig_s (float) : Eigenvalue of s-orbital
Element.SSE (float) : Solid State Energy
Element.SSEPauling (float) : SSE based on regression fit with Pauling electronegativity
Element.oxidation_states (list) : Default list of allowed oxidation states for use in SMACT
Element.oxidation_states_sp (list) : List of oxdation states recognised by the Pymatgen Structure Predictor
Element.oxidation_states_icsd (list) : List of oxidation states that appear in the ICSD
Element.coord_envs (list): The allowed coordination enviroments for the ion
Element.covalent_radius (float) : Covalent radius of the element
Element.mass (float) : Molar mass of the element
Element.crustal_abundance (float) : Crustal abundance in the earths crust mg/kg taken from CRC
Element.HHI_p (float) : Herfindahl-Hirschman Index for elemental production
Element.HHI_r (float) : Hirfindahl-Hirschman Index for elemental reserves
Raises:
NameError: Element not found in element.txt
Warning: Element not found in Eigenvalues.csv
"""
def __init__(self, symbol):
"""Initialise Element class
Args:
symbol (str): Chemical element symbol (e.g. 'Fe')
"""
dataset = data_loader.lookup_element_data(symbol, copy=False)
if dataset == None:
raise NameError("Elemental data for {0} not found.".format(symbol))
# Set coordination-environment data from the Shannon-radius data.
# As above, it is safe to use copy = False with this Get* function.
shannon_data = data_loader.lookup_element_shannon_radius_data(symbol, copy=False)
if shannon_data != None:
coord_envs = [row['coordination'] for row in shannon_data]
else:
coord_envs = None
HHI_scores = data_loader.lookup_element_hhis(symbol)
if HHI_scores == None:
HHI_scores = (None, None)
sse_data = data_loader.lookup_element_sse_data(symbol)
if sse_data:
sse = sse_data['SolidStateEnergy']
else:
sse = None
sse_Pauling_data = data_loader.lookup_element_sse_pauling_data(symbol)
if sse_Pauling_data:
sse_Pauling = sse_Pauling_data['SolidStateEnergyPauling']
else:
sse_Pauling = None
for attribute, value in (
('coord_envs', coord_envs),
('covalent_radius', dataset['r_cov']),
('crustal_abundance', dataset['Abundance']),
('e_affinity', dataset['e_affinity']),
('eig', dataset['p_eig']),
('eig_s', dataset['s_eig']),
('HHI_p', HHI_scores[0]),
('HHI_r', HHI_scores[1]),
('ionpot', dataset['ion_pot']),
('mass', dataset['Mass']),
('name', dataset['Name']),
('number', dataset['Z']),
('oxidation_states',
data_loader.lookup_element_oxidation_states(symbol)),
('oxidation_states_icsd',
data_loader.lookup_element_oxidation_states_icsd(symbol)),
('oxidation_states_sp',
data_loader.lookup_element_oxidation_states_sp(symbol)),
('dipol', dataset['dipol']),
('pauling_eneg', dataset['el_neg']),
('SSE', sse),
('SSEPauling', sse_Pauling),
('symbol', symbol),
#('vdw_radius', dataset['RVdW']),
):
setattr(self, attribute, value)
class Species(Element):
"""
Class providing data for elements in a given chemical environment
In addition to the standard properties from the periodic table
(inherited from the Element class), Species objects use the
oxidation state and coordination environment to provide further
properties.
Attributes:
Species.symbol: Elemental symbol used to retrieve data
Species.name: Full name of element
Species.oxidation: Oxidation state of species (signed integer)
Species.coordination: Coordination number of species (integer)
Species.pauling_eneg: Pauling electronegativity (0.0 if unknown)
Species.ionpot: Ionisation potential in eV (0.0 if unknown)
Species.e_affinity: Electron affinity in eV (0.0 if unknown)
Species.eig: Electron eigenvalue (units unknown)
N.B. For Cu, Au and Ag this defaults to d-orbital.
Species.shannon_radius: Shannon radius of Species.
Species.ionic_radius: Ionic radius of Species.
Raises:
NameError: Element not found in element.txt
Warning: Element not found in Eigenvalues.csv
"""
def __init__(self,symbol,oxidation,coordination=4, radii_source="shannon"):
Element.__init__(self,symbol)
self.oxidation = oxidation
self.coordination = coordination
# Get shannon radius for the oxidation state and coordination.
self.shannon_radius = None;
if radii_source=="shannon":
shannon_data = data_loader.lookup_element_shannon_radius_data(symbol);
elif radii_source == "extended":
shannon_data = data_loader.lookup_element_shannon_radius_data_extendedML(symbol)
else:
print("Data source not recognised. Please select 'shannon' or 'extended'. ")
if shannon_data:
for dataset in shannon_data:
if dataset['charge'] == oxidation and str(coordination) == dataset['coordination'].split('_')[0]:
self.shannon_radius = dataset['crystal_radius'];
# Get ionic radius
self.ionic_radius = None;
if shannon_data:
for dataset in shannon_data:
if dataset['charge'] == oxidation and str(coordination) == dataset['coordination'].split('_')[0]:
self.ionic_radius = dataset['ionic_radius'];
# Get SSE_2015 (revised) for the oxidation state.
self.SSE_2015 = None
sse_2015_data = data_loader.lookup_element_sse2015_data(symbol);
if sse_2015_data:
for dataset in sse_2015_data:
if dataset['OxidationState'] == oxidation:
self.SSE_2015 = dataset['SolidStateEnergy2015']
else:
self.SSE_2015 = None
def ordered_elements(x,y):
"""
Return a list of element symbols, ordered by proton number in the range x -> y
Args:
x,y : integers
Returns:
list: Ordered list of element symbols
"""
with open(path.join(data_directory,
'ordered_periodic.txt'), 'r') as f:
data = f.readlines()
elements = []
for line in data:
inp = line.split()
elements.append(inp[0])
ordered_elements = []
for i in range(x,y+1):
ordered_elements.append(elements[i-1])
return ordered_elements
def element_dictionary(elements=None):
"""
Create a dictionary of initialised smact.Element objects
Accessing an Element from a dict is significantly faster than
repeadedly initialising them on-demand within nested loops.
Args:
elements (iterable of strings) : Elements to include. If None,
use all elements up to 103.
Returns:
dict: Dictionary with element symbols as keys and smact.Element
objects as data
"""
if elements == None:
elements = ordered_elements(1,103)
return {symbol: Element(symbol) for symbol in elements}
def are_eq(A,B,tolerance=1e-4):
"""Check two arrays for tolerance [1,2,3]==[1,2,3]; but [1,3,2]!=[1,2,3]
Args:
A, B (lists): 1-D list of values for approximate equality comparison
tolerance: numerical precision for equality condition
Returns:
boolean
"""
are_eq = True
if len(A) != len(B):
are_eq = False
else:
i = 0
while i < len(A):
if abs(A[i] - B[i]) > tolerance:
are_eq = False
i = i + 1
return are_eq
def lattices_are_same(lattice1, lattice2, tolerance=1e-4):
"""Checks for the equivalence of two lattices
Args:
lattice1,lattice2 : ASE crystal class
Returns:
boolean
"""
lattices_are_same = False
i = 0
for site1 in lattice1:
for site2 in lattice2:
if site1.symbol == site2.symbol:
if are_eq(site1.position,
site2.position,
tolerance=tolerance):
i += 1
if i == len(lattice1):
lattices_are_same = True
return lattices_are_same
def _gcd_recursive(*args):
"""
Get the greatest common denominator among any number of ints
"""
if len(args) == 2:
return gcd(*args)
else:
return gcd(args[0], _gcd_recursive(*args[1:]))
def _isneutral(oxidations, stoichs):
"""
Check if set of oxidation states is neutral in given stoichiometry
Args:
oxidations (tuple): Oxidation states of a set of oxidised elements
stoichs (tuple): Stoichiometry values corresponding to `oxidations`
"""
return 0 == sum(map(multiply, oxidations, stoichs))
def neutral_ratios_iter(oxidations, stoichs=False, threshold=5):
"""
Iterator for charge-neutral stoichiometries
Given a list of oxidation states of arbitrary length, yield ratios in which
these form a charge-neutral compound. Stoichiometries may be provided as a
set of legal stoichiometries per site (e.g. a known family of compounds);
otherwise all unique ratios are tried up to a threshold coefficient.
Args:
oxidations : list of integers
stoichs : stoichiometric ratios for each site (if provided)
threshold : single threshold to go up to if stoichs are not provided
Yields:
tuple: ratio that gives neutrality
"""
if not stoichs:
stoichs = [list(range(1,threshold+1))] * len(oxidations)
# First filter: remove combinations which have a common denominator
# greater than 1 (i.e. Use simplest form of each set of ratios)
# Second filter: return only charge-neutral combinations
return filter(
lambda x: _isneutral(oxidations, x) and _gcd_recursive(*x) == 1,
# Generator: enumerate all combinations of stoichiometry
itertools.product(*stoichs)
)
def neutral_ratios(oxidations, stoichs=False, threshold=5):
"""
Get a list of charge-neutral compounds
Given a list of oxidation states of arbitrary length, yield ratios in which
these form a charge-neutral compound. Stoichiometries may be provided as a
set of legal stoichiometries per site (e.g. a known family of compounds);
otherwise all unique ratios are tried up to a threshold coefficient.
Given a list of oxidation states of arbitrary length it searches for
neutral ratios in a given ratio of sites (stoichs) or up to a given
threshold.
Args:
oxidations (list of ints): Oxidation state of each site
stoichs (list of positive ints): A selection of valid stoichiometric
ratios for each site
threshold (int): Maximum stoichiometry coefficient; if no 'stoichs'
argument is provided, all combinations of integer coefficients up
to this value will be tried.
Returns:
(exists, allowed_ratios) (tuple):
exists *bool*:
True ifc any ratio exists, otherwise False
allowed_ratios *list of tuples*:
Ratios of atoms in given oxidation
states which yield a charge-neutral structure
"""
allowed_ratios = [x for x in neutral_ratios_iter(oxidations,
stoichs=stoichs,
threshold=threshold)]
return (len(allowed_ratios) > 0, allowed_ratios)
# List of metals
metals = ['Li','Be','Na','Mg','Al','K','Ca','Sc','Ti','V','Cr','Mn','Fe','Co','Ni',
'Cu','Zn','Ga','Ge','Rb','Sr','Y','Zr','Nb','Mo','Tc','Ru','Rh','Pd','Ag','Cd','In','Sn','Sb',
'Cs','Ba','La','Ce', 'Pr','Nd','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb',
'Lu','Hf','Ta','W','Re','Os','Ir','Pt','Au','Hg','Tl','Pb','Bi','Po','Fr','Ra','Ac',
'Th','Pa','U','Np','Pu','Am','Cm','Bk','Cf','Es','Fm','Md','No']
# List of elements that can be considered 'anions'.
# Similar to the Pymatgen 'electronegative elements' but excluding H, B, C & Si.
anions = ["N", "P", "As", "Sb",
"O", "S", "Se", "Te",
"F", "Cl", "Br", "I"]
# List of d-block metals
d_block = ['Sc','Ti','V','Cr','Mn','Fe','Co','Ni','Cu','Zn',
'Y','Zr','Nb','Mo','Tc','Ru','Rh','Pd','Ag','Cd',
'La','Hf','Ta','W','Re','Os','Ir','Pt','Au','Hg']
|
WMD-group/SMACT
|
smact/__init__.py
|
Python
|
mit
| 14,679 | 0.011309 |
#!/usr/bin/env python
import sys
from os.path import normpath, join, dirname, abspath
machine_file = normpath(join(dirname(abspath(__file__)),
'../files/machine-images.csv'))
def read_machine_file():
amis = {}
with open(machine_file) as fp:
for l in fp:
type, region, ami = l[:-1].split(',')
amis[type + ':' + region] = ami
return amis
def write_machine_file(amis):
with open(machine_file, 'w') as fp:
for k in sorted(amis.keys()):
type, region = k.split(':')
print('{},{},{}'.format(type, region, amis[k]), file=fp)
def get_ami(type, region):
return read_machine_file().get(type + ':' + region)
def set_ami(type, region, ami):
amis = read_machine_file()
amis[type + ':' + region] = ami
write_machine_file(amis)
def main(argv):
if len(argv) == 3:
print(get_ami(argv[1], argv[2]))
elif len(argv) == 4:
set_ami(argv[1], argv[2], argv[3])
else:
print("""
Usage:
Get AMI ami.py <type> <region>
Save AMI ami.py <type> <region> <ami>
""")
sys.exit(1)
if __name__ == "__main__":
main(sys.argv)
|
Cadasta/cadasta-platform
|
deployment/scripts/ami.py
|
Python
|
agpl-3.0
| 1,182 | 0.001692 |
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
compat_str,
float_or_none,
int_or_none,
smuggle_url,
str_or_none,
try_get,
)
class STVPlayerIE(InfoExtractor):
IE_NAME = 'stv:player'
_VALID_URL = r'https?://player\.stv\.tv/(?P<type>episode|video)/(?P<id>[a-z0-9]{4})'
_TESTS = [{
# shortform
'url': 'https://player.stv.tv/video/4gwd/emmerdale/60-seconds-on-set-with-laura-norton/',
'md5': '5adf9439c31d554f8be0707c7abe7e0a',
'info_dict': {
'id': '5333973339001',
'ext': 'mp4',
'upload_date': '20170301',
'title': '60 seconds on set with Laura Norton',
'description': "How many questions can Laura - a.k.a Kerry Wyatt - answer in 60 seconds? Let\'s find out!",
'timestamp': 1488388054,
'uploader_id': '1486976045',
},
'skip': 'this resource is unavailable outside of the UK',
}, {
# episodes
'url': 'https://player.stv.tv/episode/4125/jennifer-saunders-memory-lane',
'only_matching': True,
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1486976045/default_default/index.html?videoId=%s'
_PTYPE_MAP = {
'episode': 'episodes',
'video': 'shortform',
}
def _real_extract(self, url):
ptype, video_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(url, video_id, fatal=False) or ''
props = (self._parse_json(self._search_regex(
r'<script[^>]+id="__NEXT_DATA__"[^>]*>({.+?})</script>',
webpage, 'next data', default='{}'), video_id,
fatal=False) or {}).get('props') or {}
player_api_cache = try_get(
props, lambda x: x['initialReduxState']['playerApiCache']) or {}
api_path, resp = None, {}
for k, v in player_api_cache.items():
if k.startswith('/episodes/') or k.startswith('/shortform/'):
api_path, resp = k, v
break
else:
episode_id = str_or_none(try_get(
props, lambda x: x['pageProps']['episodeId']))
api_path = '/%s/%s' % (self._PTYPE_MAP[ptype], episode_id or video_id)
result = resp.get('results')
if not result:
resp = self._download_json(
'https://player.api.stv.tv/v1' + api_path, video_id)
result = resp['results']
video = result['video']
video_id = compat_str(video['id'])
subtitles = {}
_subtitles = result.get('_subtitles') or {}
for ext, sub_url in _subtitles.items():
subtitles.setdefault('en', []).append({
'ext': 'vtt' if ext == 'webvtt' else ext,
'url': sub_url,
})
programme = result.get('programme') or {}
return {
'_type': 'url_transparent',
'id': video_id,
'url': smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % video_id, {'geo_countries': ['GB']}),
'description': result.get('summary'),
'duration': float_or_none(video.get('length'), 1000),
'subtitles': subtitles,
'view_count': int_or_none(result.get('views')),
'series': programme.get('name') or programme.get('shortName'),
'ie_key': 'BrightcoveNew',
}
|
rg3/youtube-dl
|
youtube_dl/extractor/stv.py
|
Python
|
unlicense
| 3,447 | 0.002031 |
import random
from django.test import TestCase
from .models import TechType, Tech, Component
# shared data across the tests
types = [
{
"name": "framework",
"description": "to do web stuff"
},
{
"name": "database",
"description": "to store stuff"
},
{
"name": "application",
"description": "to show stuff"
},
{
"name": "balancer",
"description": "to balance stuff"
},
{
"name": "web server",
"description": "to serve stuff"
},
{
"name": "programming language",
"description": "to programm stuff"
},
]
techs = [
{
"name": "django",
"description": "The best web framework",
"url": "https://www.djangoproject.com/",
"logo": "http://someimage.com/some.png",
"open_source": True,
"repo": "https://github.com/django/django.git",
},
{
"name": "python",
"description": "The programming language",
"url": "http://www.python.org/",
"logo": "http://someimage.com/some.png",
"open_source": True,
"repo": "http://hg.python.org/cpython/",
},
{
"name": "golang",
"description": "The other programming language",
"url": "http://www.golang.org/",
"logo": "http://someimage.com/some.png",
"open_source": True,
"repo": "https://code.google.com/p/go",
},
{
"name": "postgresql",
"description": "The best relational database",
"url": "http://www.postgresql.org/",
"logo": "http://someimage.com/some.png",
"open_source": True,
"repo": "https://github.com/postgres/postgres",
},
{
"name": "nginx",
"description": "The best http server",
"url": "http://nginx.org/",
"logo": "http://someimage.com/some.png",
"open_source": True,
"repo": "http://hg.nginx.org/nginx",
},
{
"name": "sharestack",
"description": "The best app",
"url": "http://sharestack.org/",
"logo": "http://someimage.com/some.png",
"open_source": True,
"repo": "https://github.com/sharestack/sharestack",
},
]
components = [
{
"name": "sharestack",
"version": "1.0",
"config": '{"json-config": "yeah"}',
"description": "This is a description of a versioned app",
},
{
"name": "nginx",
"version": "1.5.11",
"config": 'nginx config big string',
"description": "This is a description of a versioned nginx",
},
{
"name": "postgresql",
"version": "9.3",
"config": 'postgres config',
"description": "This is a description of a versioned postgres",
},
{
"name": "python",
"version": "2.7.6",
"config": 'python config',
"description": "This is a description of a versioned python",
},
]
class TechTypeTests(TestCase):
def setUp(self):
pass
def test_save(self):
for i in types:
t = TechType(**i)
t.save()
self.assertEqual(len(TechType.objects.all()), len(types))
def test_retrieval(self):
for i in types:
t = TechType(**i)
t.save()
t2 = TechType.objects.get(id=t.id)
self.assertEqual(t, t2)
def test_filter(self):
for i in types:
t = TechType(**i)
t.save()
tech_type = types[random.randrange(len(types))]
t = TechType.objects.filter(name=tech_type["name"])[0]
self.assertEqual(t.description, tech_type["description"])
def test_str(self):
for i in types:
t = TechType(**i)
self.assertEqual(str(t), i["name"])
class TechTests(TestCase):
def test_save(self):
for i in techs:
t = Tech(**i)
t.save()
self.assertEqual(len(Tech.objects.all()), len(techs))
def test_retrieval(self):
for i in techs:
t = Tech(**i)
t.save()
t2 = Tech.objects.get(id=t.id)
self.assertEqual(t, t2)
def test_filter(self):
for i in techs:
t = Tech(**i)
t.save()
tech = techs[random.randrange(len(techs))]
t = Tech.objects.filter(name=tech["name"])[0]
self.assertEqual(t.url, tech["url"])
def test_related_fields(self):
# Create types
programming_lang = TechType(**types[5]) # Is the 6th
app = TechType(**types[2]) # Is the 3rd
framework = TechType(**types[0]) # Is the 1st
database = TechType(**types[1]) # Is the 2nd
# Create techs
python = Tech(**techs[1]) # Is the 2nd
go = Tech(**techs[2]) # Is the 3rd
django = Tech(**techs[0]) # Is the 1st
sharestack = Tech(**techs[-1]) # Is the last
postgres = Tech(**techs[3]) # Is the 4th
# Save before adding m2m fields
programming_lang.save()
app.save()
framework.save()
database.save()
# save remaining
python.save()
go.save()
django.save()
sharestack.save()
postgres.save()
# add types
python.types.add(programming_lang)
go.types.add(programming_lang)
django.types.add(framework)
sharestack.types.add(app)
postgres.types.add(database)
# Check types are ok in both sides for programmign languages
python2 = Tech.objects.get(name=python.name)
go2 = Tech.objects.get(name=go.name)
programming_lang2 = TechType.objects.get(name=programming_lang.name)
self.assertEqual(python2.types.all()[0], programming_lang2)
self.assertEqual(go2.types.all()[0], programming_lang2)
self.assertEqual(len(programming_lang2.tech_set.all()), 2)
# Check tech componente are ok in one side with sharestack
django.tech_components.add(python)
sharestack.tech_components.add(python, django, postgres)
self.assertEqual(len(sharestack.tech_components.all()), 3)
# Isn't symmetrical so postgres shouldn't have sharestack
self.assertEqual(len(postgres.tech_components.all()), 0)
def test_str(self):
for i in techs:
t = Tech(**i)
self.assertEqual(str(t), i["name"])
class ComponentTests(TestCase):
def setUp(self):
# Save first all the types
self.tech_objects = {}
for i in techs:
t = Tech(**i)
t.save()
self.tech_objects[i["name"]] = t
def test_save(self):
for i in components:
c = Component(**i)
c.tech = self.tech_objects[c.name]
c.save()
self.assertEqual(len(Component.objects.all()), len(components))
def test_retrieval(self):
for i in components:
c = Component(**i)
c.tech = self.tech_objects[c.name]
c.save()
c2 = Component.objects.get(id=c.id)
self.assertEqual(c, c2)
def test_filter(self):
for i in components:
c = Component(**i)
c.tech = self.tech_objects[c.name]
c.save()
component = components[random.randrange(len(components))]
c = Component.objects.filter(name=component["name"])[0]
self.assertEqual(c.version, component["version"])
def test_related_fields(self):
# Create 2 components and add the same tech to each.
django_tech = Tech.objects.get(name="django")
sharestack = Component(**components[0])
sharestack.tech = django_tech
nginx = Component(**components[1])
nginx.tech = django_tech
sharestack.save()
nginx.save()
# Check the tech has the 2 components
t = Tech.objects.get(name=django_tech.name)
self.assertEqual(len(t.component_set.all()), 2)
def test_str(self):
for i in components:
t = Component(**i)
self.assertEqual(str(t), i["name"])
|
sharestack/sharestack-api
|
sharestackapi/techs/test_models.py
|
Python
|
mit
| 8,113 | 0 |
"""
.. _ex-morph-surface:
=============================
Morph surface source estimate
=============================
This example demonstrates how to morph an individual subject's
:class:`mne.SourceEstimate` to a common reference space. We achieve this using
:class:`mne.SourceMorph`. Pre-computed data will be morphed based on
a spherical representation of the cortex computed using the spherical
registration of :ref:`FreeSurfer <tut-freesurfer>`
(https://surfer.nmr.mgh.harvard.edu/fswiki/SurfaceRegAndTemplates) [1]_. This
transform will be used to morph the surface vertices of the subject towards the
reference vertices. Here we will use 'fsaverage' as a reference space (see
https://surfer.nmr.mgh.harvard.edu/fswiki/FsAverage).
The transformation will be applied to the surface source estimate. A plot
depicting the successful morph will be created for the spherical and inflated
surface representation of ``'fsaverage'``, overlaid with the morphed surface
source estimate.
References
----------
.. [1] Greve D. N., Van der Haegen L., Cai Q., Stufflebeam S., Sabuncu M.
R., Fischl B., Brysbaert M.
A Surface-based Analysis of Language Lateralization and Cortical
Asymmetry. Journal of Cognitive Neuroscience 25(9), 1477-1492, 2013.
.. note:: For background information about morphing see :ref:`ch_morph`.
"""
# Author: Tommy Clausner <tommy.clausner@gmail.com>
#
# License: BSD (3-clause)
import os
import mne
from mne.datasets import sample
print(__doc__)
###############################################################################
# Setup paths
sample_dir_raw = sample.data_path()
sample_dir = os.path.join(sample_dir_raw, 'MEG', 'sample')
subjects_dir = os.path.join(sample_dir_raw, 'subjects')
fname_stc = os.path.join(sample_dir, 'sample_audvis-meg')
###############################################################################
# Load example data
# Read stc from file
stc = mne.read_source_estimate(fname_stc, subject='sample')
###############################################################################
# Setting up SourceMorph for SourceEstimate
# -----------------------------------------
#
# In MNE surface source estimates represent the source space simply as
# lists of vertices (see
# :ref:`tut-source-estimate-class`).
# This list can either be obtained from
# :class:`mne.SourceSpaces` (src) or from the ``stc`` itself.
#
# Since the default ``spacing`` (resolution of surface mesh) is ``5`` and
# ``subject_to`` is set to 'fsaverage', :class:`mne.SourceMorph` will use
# default ico-5 ``fsaverage`` vertices to morph, which are the special
# values ``[np.arange(10242)] * 2``.
#
# .. note:: This is not generally true for other subjects! The set of vertices
# used for ``fsaverage`` with ico-5 spacing was designed to be
# special. ico-5 spacings for other subjects (or other spacings
# for fsaverage) must be calculated and will not be consecutive
# integers.
#
# If src was not defined, the morph will actually not be precomputed, because
# we lack the vertices *from* that we want to compute. Instead the morph will
# be set up and when applying it, the actual transformation will be computed on
# the fly.
#
# Initialize SourceMorph for SourceEstimate
morph = mne.compute_source_morph(stc, subject_from='sample',
subject_to='fsaverage',
subjects_dir=subjects_dir)
###############################################################################
# Apply morph to (Vector) SourceEstimate
# --------------------------------------
#
# The morph will be applied to the source estimate data, by giving it as the
# first argument to the morph we computed above.
stc_fsaverage = morph.apply(stc)
###############################################################################
# Plot results
# ------------
# Define plotting parameters
surfer_kwargs = dict(
hemi='lh', subjects_dir=subjects_dir,
clim=dict(kind='value', lims=[8, 12, 15]), views='lateral',
initial_time=0.09, time_unit='s', size=(800, 800),
smoothing_steps=5)
# As spherical surface
brain = stc_fsaverage.plot(surface='sphere', **surfer_kwargs)
# Add title
brain.add_text(0.1, 0.9, 'Morphed to fsaverage (spherical)', 'title',
font_size=16)
###############################################################################
# As inflated surface
brain_inf = stc_fsaverage.plot(surface='inflated', **surfer_kwargs)
# Add title
brain_inf.add_text(0.1, 0.9, 'Morphed to fsaverage (inflated)', 'title',
font_size=16)
###############################################################################
# Reading and writing SourceMorph from and to disk
# ------------------------------------------------
#
# An instance of SourceMorph can be saved, by calling
# :meth:`morph.save <mne.SourceMorph.save>`.
#
# This method allows for specification of a filename under which the ``morph``
# will be save in ".h5" format. If no file extension is provided, "-morph.h5"
# will be appended to the respective defined filename::
#
# >>> morph.save('my-file-name')
#
# Reading a saved source morph can be achieved by using
# :func:`mne.read_source_morph`::
#
# >>> morph = mne.read_source_morph('my-file-name-morph.h5')
#
# Once the environment is set up correctly, no information such as
# ``subject_from`` or ``subjects_dir`` must be provided, since it can be
# inferred from the data and use morph to 'fsaverage' by default. SourceMorph
# can further be used without creating an instance and assigning it to a
# variable. Instead :func:`mne.compute_source_morph` and
# :meth:`mne.SourceMorph.apply` can be
# easily chained into a handy one-liner. Taking this together the shortest
# possible way to morph data directly would be:
stc_fsaverage = mne.compute_source_morph(stc,
subjects_dir=subjects_dir).apply(stc)
|
mne-tools/mne-tools.github.io
|
0.20/_downloads/e414d894f3f4079b3e5897dd9c691af7/plot_morph_surface_stc.py
|
Python
|
bsd-3-clause
| 5,938 | 0 |
# Copyright(c) 2009, Gentoo Foundation
#
# Licensed under the GNU General Public License, v2
#
# $Header: $
"""Checks timestamps and MD5 sums for files owned by a given installed package"""
from __future__ import print_function
__docformat__ = 'epytext'
# =======
# Imports
# =======
import os
import sys
from functools import partial
from getopt import gnu_getopt, GetoptError
import portage.checksum as checksum
import gentoolkit.pprinter as pp
from gentoolkit import errors
from gentoolkit.equery import format_options, mod_usage, CONFIG
from gentoolkit.query import Query
# =======
# Globals
# =======
QUERY_OPTS = {
"in_installed": True,
"in_porttree": False,
"in_overlay": False,
"check_MD5sum": True,
"check_timestamp" : True,
"is_regex": False,
"only_failures": False,
"show_progress": False,
}
# =======
# Classes
# =======
class VerifyContents(object):
"""Verify installed packages' CONTENTS files.
The CONTENTS file contains timestamps and MD5 sums for each file owned
by a package.
"""
def __init__(self, printer_fn=None):
"""Create a VerifyObjects instance.
@type printer_fn: callable
@param printer_fn: if defined, will be applied to each result as found
"""
self.check_sums = True
self.check_timestamps = True
self.printer_fn = printer_fn
self.is_regex = False
def __call__(
self,
pkgs,
is_regex=False,
check_sums=True,
check_timestamps=True
):
self.is_regex = is_regex
self.check_sums = check_sums
self.check_timestamps = check_timestamps
result = {}
for pkg in pkgs:
# _run_checks returns tuple(n_passed, n_checked, err)
check_results = self._run_checks(pkg.parsed_contents())
result[pkg.cpv] = check_results
if self.printer_fn is not None:
self.printer_fn(pkg.cpv, check_results)
return result
def _run_checks(self, files):
"""Run some basic sanity checks on a package's contents.
If the file type (ftype) is not a directory or symlink, optionally
verify MD5 sums or mtimes via L{self._verify_obj}.
@see: gentoolkit.packages.get_contents()
@type files: dict
@param files: in form {'PATH': ['TYPE', 'TIMESTAMP', 'MD5SUM']}
@rtype: tuple
@return:
n_passed (int): number of files that passed all checks
n_checked (int): number of files checked
errs (list): check errors' descriptions
"""
n_checked = 0
n_passed = 0
errs = []
for cfile in files:
n_checked += 1
ftype = files[cfile][0]
if not os.path.exists(cfile):
errs.append("%s does not exist" % cfile)
continue
elif ftype == "dir":
if not os.path.isdir(cfile):
err = "%(cfile)s exists, but is not a directory"
errs.append(err % locals())
continue
elif ftype == "obj":
obj_errs = self._verify_obj(files, cfile, errs)
if len(obj_errs) > len(errs):
errs = obj_errs[:]
continue
elif ftype == "sym":
target = files[cfile][2].strip()
if not os.path.islink(cfile):
err = "%(cfile)s exists, but is not a symlink"
errs.append(err % locals())
continue
tgt = os.readlink(cfile)
if tgt != target:
err = "%(cfile)s does not point to %(target)s"
errs.append(err % locals())
continue
else:
err = "%(cfile)s has unknown type %(ftype)s"
errs.append(err % locals())
continue
n_passed += 1
return n_passed, n_checked, errs
def _verify_obj(self, files, cfile, errs):
"""Verify the MD5 sum and/or mtime and return any errors."""
obj_errs = errs[:]
if self.check_sums:
md5sum = files[cfile][2]
try:
cur_checksum = checksum.perform_md5(cfile, calc_prelink=1)
except IOError:
err = "Insufficient permissions to read %(cfile)s"
obj_errs.append(err % locals())
return obj_errs
if cur_checksum != md5sum:
err = "%(cfile)s has incorrect MD5sum"
obj_errs.append(err % locals())
return obj_errs
if self.check_timestamps:
mtime = int(files[cfile][1])
st_mtime = int(os.lstat(cfile).st_mtime)
if st_mtime != mtime:
err = (
"%(cfile)s has wrong mtime (is %(st_mtime)d, should be "
"%(mtime)d)"
)
obj_errs.append(err % locals())
return obj_errs
return obj_errs
# =========
# Functions
# =========
def print_help(with_description=True):
"""Print description, usage and a detailed help message.
@type with_description: bool
@param with_description: if true, print module's __doc__ string
"""
if with_description:
print(__doc__.strip())
print()
# Deprecation warning added by djanderson, 12/2008
depwarning = (
"Default action for this module has changed in Gentoolkit 0.3.",
"Use globbing to simulate the old behavior (see man equery).",
"Use '*' to check all installed packages.",
"Use 'foo-bar/*' to filter by category."
)
for line in depwarning:
sys.stderr.write(pp.warn(line))
print()
print(mod_usage(mod_name="check"))
print()
print(pp.command("options"))
print(format_options((
(" -h, --help", "display this help message"),
(" -f, --full-regex", "query is a regular expression"),
(" -o, --only-failures", "only display packages that do not pass"),
)))
def checks_printer(cpv, data, verbose=True, only_failures=False):
"""Output formatted results of pkg file(s) checks"""
seen = []
n_passed, n_checked, errs = data
n_failed = n_checked - n_passed
if only_failures and not n_failed:
return
else:
if verbose:
if not cpv in seen:
pp.uprint("* Checking %s ..." % (pp.emph(str(cpv))))
seen.append(cpv)
else:
pp.uprint("%s:" % cpv, end=' ')
if verbose:
for err in errs:
sys.stderr.write(pp.error(err))
if verbose:
n_passed = pp.number(str(n_passed))
n_checked = pp.number(str(n_checked))
info = " %(n_passed)s out of %(n_checked)s files passed"
print(info % locals())
else:
print("failed(%s)" % n_failed)
def parse_module_options(module_opts):
"""Parse module options and update QUERY_OPTS"""
opts = (x[0] for x in module_opts)
for opt in opts:
if opt in ('-h', '--help'):
print_help()
sys.exit(0)
elif opt in ('-f', '--full-regex'):
QUERY_OPTS['is_regex'] = True
elif opt in ('-o', '--only-failures'):
QUERY_OPTS['only_failures'] = True
def main(input_args):
"""Parse input and run the program"""
short_opts = "hof"
long_opts = ('help', 'only-failures', 'full-regex')
try:
module_opts, queries = gnu_getopt(input_args, short_opts, long_opts)
except GetoptError as err:
sys.stderr.write(pp.error("Module %s" % err))
print()
print_help(with_description=False)
sys.exit(2)
parse_module_options(module_opts)
if not queries:
print_help()
sys.exit(2)
first_run = True
for query in (Query(x, QUERY_OPTS['is_regex']) for x in queries):
if not first_run:
print()
matches = query.smart_find(**QUERY_OPTS)
if not matches:
raise errors.GentoolkitNoMatches(query, in_installed=True)
matches.sort()
printer = partial(
checks_printer,
verbose=CONFIG['verbose'],
only_failures=QUERY_OPTS['only_failures']
)
check = VerifyContents(printer_fn=printer)
check(matches)
first_run = False
# vim: set ts=4 sw=4 tw=79:
|
bacher09/Gentoolkit
|
pym/gentoolkit/equery/check.py
|
Python
|
gpl-2.0
| 7,056 | 0.029337 |
# Test the module type
from test.test_support import verify, vereq, verbose, TestFailed
from types import ModuleType as module
# An uninitialized module has no __dict__ or __name__, and __doc__ is None
foo = module.__new__(module)
verify(foo.__dict__ is None)
try:
s = foo.__name__
except AttributeError:
pass
else:
raise TestFailed, "__name__ = %s" % repr(s)
# __doc__ is None by default in CPython but not in Jython.
# We're not worrying about that now.
#vereq(foo.__doc__, module.__doc__)
try:
foo_dir = dir(foo)
except TypeError:
pass
else:
raise TestFailed, "__dict__ = %s" % repr(foo_dir)
try:
del foo.somename
except AttributeError:
pass
else:
raise TestFailed, "del foo.somename"
try:
del foo.__dict__
except TypeError:
pass
else:
raise TestFailed, "del foo.__dict__"
try:
foo.__dict__ = {}
except TypeError:
pass
else:
raise TestFailed, "foo.__dict__ = {}"
verify(foo.__dict__ is None)
# Regularly initialized module, no docstring
foo = module("foo")
vereq(foo.__name__, "foo")
vereq(foo.__doc__, None)
vereq(foo.__dict__, {"__name__": "foo", "__package__": None, "__doc__": None})
# ASCII docstring
foo = module("foo", "foodoc")
vereq(foo.__name__, "foo")
vereq(foo.__doc__, "foodoc")
vereq(foo.__dict__, {"__name__": "foo", "__package__": None, "__doc__": "foodoc"})
# Unicode docstring
foo = module("foo", u"foodoc\u1234")
vereq(foo.__name__, "foo")
vereq(foo.__doc__, u"foodoc\u1234")
vereq(foo.__dict__, {"__name__": "foo", "__package__": None, "__doc__": u"foodoc\u1234"})
# Reinitialization should not replace the __dict__
foo.bar = 42
d = foo.__dict__
foo.__init__("foo", "foodoc")
vereq(foo.__name__, "foo")
vereq(foo.__doc__, "foodoc")
vereq(foo.bar, 42)
vereq(foo.__dict__, {"__name__": "foo", "__package__": None, "__doc__": "foodoc", "bar": 42})
verify(foo.__dict__ is d)
if verbose:
print "All OK"
|
adaussy/eclipse-monkey-revival
|
plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/test/test_module.py
|
Python
|
epl-1.0
| 1,896 | 0.004747 |
import HTMLParser
import json
from xml.etree import ElementTree
from django.conf import settings
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseBadRequest, Http404
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.cache import cache_page
from django.views.generic import View
from couchdbkit import ResourceConflict
from casexml.apps.case.models import CASE_STATUS_OPEN
from casexml.apps.case.xml import V2
from casexml.apps.phone.fixtures import generator
from corehq.form_processor.utils import should_use_sql_backend
from corehq.form_processor.utils.general import use_sqlite_backend
from dimagi.utils.logging import notify_exception
from dimagi.utils.parsing import string_to_boolean
from dimagi.utils.web import json_response, get_url_base, json_handler
from touchforms.formplayer.api import DjangoAuth, get_raw_instance, sync_db
from touchforms.formplayer.models import EntrySession
from xml2json.lib import xml2json
from corehq import toggles, privileges
from corehq.apps.accounting.decorators import requires_privilege_for_commcare_user, requires_privilege_with_fallback
from corehq.apps.app_manager.dbaccessors import (
get_latest_build_doc,
get_brief_apps_in_domain,
get_latest_released_app_doc,
get_app_ids_in_domain,
get_current_app,
wrap_app,
)
from corehq.apps.app_manager.exceptions import FormNotFoundException, ModuleNotFoundException
from corehq.apps.app_manager.models import Application, ApplicationBase, RemoteApp
from corehq.apps.app_manager.suite_xml.sections.details import get_instances_for_module
from corehq.apps.app_manager.suite_xml.sections.entries import EntriesHelper
from corehq.apps.app_manager.util import get_cloudcare_session_data
from corehq.apps.cloudcare.api import (
api_closed_to_status,
CaseAPIResult,
get_app_json,
get_filtered_cases,
get_filters_from_request_params,
get_open_form_sessions,
look_up_app_json,
)
from corehq.apps.cloudcare.dbaccessors import get_cloudcare_apps
from corehq.apps.cloudcare.decorators import require_cloudcare_access
from corehq.apps.cloudcare.exceptions import RemoteAppError
from corehq.apps.cloudcare.models import ApplicationAccess
from corehq.apps.cloudcare.touchforms_api import BaseSessionDataHelper, CaseSessionDataHelper
from corehq.apps.domain.decorators import login_and_domain_required, login_or_digest_ex, domain_admin_required
from corehq.apps.groups.models import Group
from corehq.apps.reports.formdetails import readable
from corehq.apps.style.decorators import (
use_datatables,
use_jquery_ui,
)
from corehq.apps.users.models import CouchUser, CommCareUser
from corehq.apps.users.views import BaseUserSettingsView
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors, FormAccessors, LedgerAccessors
from corehq.form_processor.exceptions import XFormNotFound, CaseNotFound
from corehq.util.quickcache import skippable_quickcache
from corehq.util.xml_utils import indent_xml
from corehq.apps.analytics.tasks import track_clicked_preview_on_hubspot
from corehq.apps.analytics.utils import get_meta
@require_cloudcare_access
def default(request, domain):
return HttpResponseRedirect(reverse('cloudcare_main', args=[domain, '']))
def insufficient_privilege(request, domain, *args, **kwargs):
context = {
'domain': domain,
}
return render(request, "cloudcare/insufficient_privilege.html", context)
class CloudcareMain(View):
@use_datatables
@use_jquery_ui
@method_decorator(require_cloudcare_access)
@method_decorator(requires_privilege_for_commcare_user(privileges.CLOUDCARE))
def dispatch(self, request, *args, **kwargs):
return super(CloudcareMain, self).dispatch(request, *args, **kwargs)
def get(self, request, domain, urlPath):
try:
preview = string_to_boolean(request.GET.get("preview", "false"))
except ValueError:
# this is typically only set at all if it's intended to be true so this
# is a reasonable default for "something went wrong"
preview = True
app_access = ApplicationAccess.get_by_domain(domain)
accessor = CaseAccessors(domain)
if not preview:
apps = get_cloudcare_apps(domain)
if request.project.use_cloudcare_releases:
if (toggles.CLOUDCARE_LATEST_BUILD.enabled(domain) or
toggles.CLOUDCARE_LATEST_BUILD.enabled(request.couch_user.username)):
get_cloudcare_app = get_latest_build_doc
else:
get_cloudcare_app = get_latest_released_app_doc
apps = map(
lambda app: get_cloudcare_app(domain, app['_id']),
apps,
)
apps = filter(None, apps)
apps = map(wrap_app, apps)
# convert to json
apps = [get_app_json(app) for app in apps]
else:
# legacy functionality - use the latest build regardless of stars
apps = [get_latest_build_doc(domain, app['_id']) for app in apps]
apps = [get_app_json(ApplicationBase.wrap(app)) for app in apps if app]
else:
# big TODO: write a new apps view for Formplayer, can likely cut most out now
if toggles.USE_FORMPLAYER_FRONTEND.enabled(domain):
apps = get_cloudcare_apps(domain)
else:
apps = get_brief_apps_in_domain(domain)
apps = [get_app_json(app) for app in apps if app and (
isinstance(app, RemoteApp) or app.application_version == V2)]
meta = get_meta(request)
track_clicked_preview_on_hubspot(request.couch_user, request.COOKIES, meta)
# trim out empty apps
apps = filter(lambda app: app, apps)
apps = filter(lambda app: app_access.user_can_access_app(request.couch_user, app), apps)
def _default_lang():
if apps:
# unfortunately we have to go back to the DB to find this
return Application.get(apps[0]["_id"]).default_language
else:
return "en"
# default language to user's preference, followed by
# first app's default, followed by english
language = request.couch_user.language or _default_lang()
def _url_context():
# given a url path, returns potentially the app, parent, and case, if
# they're selected. the front end optimizes with these to avoid excess
# server calls
# there's an annoying dependency between this logic and backbone's
# url routing that seems hard to solve well. this needs to be synced
# with apps.js if anything changes
# for apps anything with "view/app/" works
# for cases it will be:
# "view/:app/:module/:form/case/:case/"
# if there are parent cases, it will be:
# "view/:app/:module/:form/parent/:parent/case/:case/
# could use regex here but this is actually simpler with the potential
# absence of a trailing slash
split = urlPath.split('/')
app_id = split[1] if len(split) >= 2 else None
if len(split) >= 5 and split[4] == "parent":
parent_id = split[5]
case_id = split[7] if len(split) >= 7 else None
else:
parent_id = None
case_id = split[5] if len(split) >= 6 else None
app = None
if app_id:
if app_id in [a['_id'] for a in apps]:
app = look_up_app_json(domain, app_id)
else:
messages.info(request, _("That app is no longer valid. Try using the "
"navigation links to select an app."))
if app is None and len(apps) == 1:
app = look_up_app_json(domain, apps[0]['_id'])
def _get_case(domain, case_id):
case = accessor.get_case(case_id)
assert case.domain == domain, "case %s not in %s" % (case_id, domain)
return case.to_api_json()
case = _get_case(domain, case_id) if case_id else None
if parent_id is None and case is not None:
parent_id = case.get('indices', {}).get('parent', {}).get('case_id', None)
parent = _get_case(domain, parent_id) if parent_id else None
return {
"app": app,
"case": case,
"parent": parent
}
context = {
"domain": domain,
"language": language,
"apps": apps,
"apps_raw": apps,
"preview": preview,
"maps_api_key": settings.GMAPS_API_KEY,
"sessions_enabled": request.couch_user.is_commcare_user(),
"use_cloudcare_releases": request.project.use_cloudcare_releases,
"username": request.user.username,
"formplayer_url": settings.FORMPLAYER_URL,
'use_sqlite_backend': use_sqlite_backend(domain),
}
context.update(_url_context())
if toggles.USE_FORMPLAYER_FRONTEND.enabled(domain):
return render(request, "cloudcare/formplayer_home.html", context)
else:
return render(request, "cloudcare/cloudcare_home.html", context)
class FormplayerMain(View):
preview = False
urlname = 'formplayer_main'
@use_datatables
@use_jquery_ui
@method_decorator(require_cloudcare_access)
@method_decorator(requires_privilege_for_commcare_user(privileges.CLOUDCARE))
def dispatch(self, request, *args, **kwargs):
return super(FormplayerMain, self).dispatch(request, *args, **kwargs)
def fetch_app(self, domain, app_id):
username = self.request.couch_user.username
if (toggles.CLOUDCARE_LATEST_BUILD.enabled(domain) or
toggles.CLOUDCARE_LATEST_BUILD.enabled(username)):
return get_latest_build_doc(domain, app_id)
else:
return get_latest_released_app_doc(domain, app_id)
def get(self, request, domain):
app_access = ApplicationAccess.get_by_domain(domain)
app_ids = get_app_ids_in_domain(domain)
apps = map(
lambda app_id: self.fetch_app(domain, app_id),
app_ids,
)
apps = filter(None, apps)
apps = filter(lambda app: app['cloudcare_enabled'] or self.preview, apps)
apps = filter(lambda app: app_access.user_can_access_app(request.couch_user, app), apps)
apps = sorted(apps, key=lambda app: app['name'])
def _default_lang():
try:
return apps[0]['langs'][0]
except Exception:
return 'en'
# default language to user's preference, followed by
# first app's default, followed by english
language = request.couch_user.language or _default_lang()
context = {
"domain": domain,
"language": language,
"apps": apps,
"maps_api_key": settings.GMAPS_API_KEY,
"username": request.user.username,
"formplayer_url": settings.FORMPLAYER_URL,
"single_app_mode": False,
"home_url": reverse(self.urlname, args=[domain]),
}
return render(request, "cloudcare/formplayer_home.html", context)
class FormplayerMainPreview(FormplayerMain):
preview = True
urlname = 'formplayer_main_preview'
def fetch_app(self, domain, app_id):
return get_current_app(domain, app_id)
class FormplayerPreviewSingleApp(View):
urlname = 'formplayer_single_app'
@use_datatables
@use_jquery_ui
@method_decorator(require_cloudcare_access)
@method_decorator(requires_privilege_for_commcare_user(privileges.CLOUDCARE))
def dispatch(self, request, *args, **kwargs):
return super(FormplayerPreviewSingleApp, self).dispatch(request, *args, **kwargs)
def get(self, request, domain, app_id, **kwargs):
app_access = ApplicationAccess.get_by_domain(domain)
app = get_current_app(domain, app_id)
if not app_access.user_can_access_app(request.couch_user, app):
raise Http404()
def _default_lang():
try:
return app['langs'][0]
except Exception:
return 'en'
# default language to user's preference, followed by
# first app's default, followed by english
language = request.couch_user.language or _default_lang()
context = {
"domain": domain,
"language": language,
"apps": [app],
"maps_api_key": settings.GMAPS_API_KEY,
"username": request.user.username,
"formplayer_url": settings.FORMPLAYER_URL,
"single_app_mode": True,
"home_url": reverse(self.urlname, args=[domain, app_id]),
}
return render(request, "cloudcare/formplayer_home.html", context)
@login_and_domain_required
@requires_privilege_for_commcare_user(privileges.CLOUDCARE)
def form_context(request, domain, app_id, module_id, form_id):
app = Application.get(app_id)
form_url = '{}{}'.format(
settings.CLOUDCARE_BASE_URL or get_url_base(),
reverse('download_xform', args=[domain, app_id, module_id, form_id])
)
case_id = request.GET.get('case_id')
instance_id = request.GET.get('instance_id')
try:
form = app.get_module(module_id).get_form(form_id)
except (FormNotFoundException, ModuleNotFoundException):
raise Http404()
form_name = form.name.values()[0]
# make the name for the session we will use with the case and form
session_name = u'{app} > {form}'.format(
app=app.name,
form=form_name,
)
if case_id:
case = CaseAccessors(domain).get_case(case_id)
session_name = u'{0} - {1}'.format(session_name, case.name)
root_context = {
'form_url': form_url,
}
if instance_id:
try:
root_context['instance_xml'] = FormAccessors(domain).get_form(instance_id).get_xml()
except XFormNotFound:
raise Http404()
session_extras = {'session_name': session_name, 'app_id': app._id}
session_extras.update(get_cloudcare_session_data(domain, form, request.couch_user))
delegation = request.GET.get('task-list') == 'true'
session_helper = CaseSessionDataHelper(domain, request.couch_user, case_id, app, form, delegation=delegation)
return json_response(session_helper.get_full_context(
root_context,
session_extras
))
cloudcare_api = login_or_digest_ex(allow_cc_users=True)
def get_cases_vary_on(request, domain):
request_params = request.GET
return [
request.couch_user.get_id
if request.couch_user.is_commcare_user() else request_params.get('user_id', ''),
request_params.get('ids_only', 'false'),
request_params.get('case_id', ''),
request_params.get('footprint', 'false'),
request_params.get('closed', 'false'),
json.dumps(get_filters_from_request_params(request_params)),
domain,
]
def get_cases_skip_arg(request, domain):
"""
When this function returns True, skippable_quickcache will not go to the cache for the result. By default,
if neither of these params are passed into the function, nothing will be cached. Cache will always be
skipped if ids_only is false.
The caching is mainly a hack for touchforms to respond more quickly. Touchforms makes repeated requests to
get the list of case_ids associated with a user.
"""
if not toggles.CLOUDCARE_CACHE.enabled(domain):
return True
request_params = request.GET
return (not string_to_boolean(request_params.get('use_cache', 'false')) or
not string_to_boolean(request_params.get('ids_only', 'false')))
@cloudcare_api
@skippable_quickcache(get_cases_vary_on, get_cases_skip_arg, timeout=240 * 60)
def get_cases(request, domain):
request_params = request.GET
if request.couch_user.is_commcare_user():
user_id = request.couch_user.get_id
else:
user_id = request_params.get("user_id", "")
if not user_id and not request.couch_user.is_web_user():
return HttpResponseBadRequest("Must specify user_id!")
ids_only = string_to_boolean(request_params.get("ids_only", "false"))
case_id = request_params.get("case_id", "")
footprint = string_to_boolean(request_params.get("footprint", "false"))
accessor = CaseAccessors(domain)
if toggles.HSPH_HACK.enabled(domain):
hsph_case_id = request_params.get('hsph_hack', None)
if hsph_case_id != 'None' and hsph_case_id and user_id:
case = accessor.get_case(hsph_case_id)
usercase_id = CommCareUser.get_by_user_id(user_id).get_usercase_id()
usercase = accessor.get_case(usercase_id) if usercase_id else None
return json_response(map(
lambda case: CaseAPIResult(domain=domain, id=case['_id'], couch_doc=case, id_only=ids_only),
filter(None, [case, case.parent, usercase])
))
if case_id and not footprint:
# short circuit everything else and just return the case
# NOTE: this allows any user in the domain to access any case given
# they know its ID, which is slightly different from the previous
# behavior (can only access things you own + footprint). If we want to
# change this contract we would need to update this to check the
# owned case list + footprint
case = accessor.get_case(case_id)
assert case.domain == domain
cases = [CaseAPIResult(domain=domain, id=case_id, couch_doc=case, id_only=ids_only)]
else:
filters = get_filters_from_request_params(request_params)
status = api_closed_to_status(request_params.get('closed', 'false'))
case_type = filters.get('properties/case_type', None)
cases = get_filtered_cases(domain, status=status, case_type=case_type,
user_id=user_id, filters=filters,
footprint=footprint, ids_only=ids_only,
strip_history=True)
return json_response(cases)
@cloudcare_api
def filter_cases(request, domain, app_id, module_id, parent_id=None):
app = Application.get(app_id)
module = app.get_module(module_id)
auth_cookie = request.COOKIES.get('sessionid')
requires_parent_cases = string_to_boolean(request.GET.get('requires_parent_cases', 'false'))
xpath = EntriesHelper.get_filter_xpath(module)
instances = get_instances_for_module(app, module, additional_xpaths=[xpath])
extra_instances = [{'id': inst.id, 'src': inst.src} for inst in instances]
use_formplayer = toggles.USE_FORMPLAYER.enabled(domain)
accessor = CaseAccessors(domain)
# touchforms doesn't like this to be escaped
xpath = HTMLParser.HTMLParser().unescape(xpath)
case_type = module.case_type
if xpath or should_use_sql_backend(domain):
# if we need to do a custom filter, send it to touchforms for processing
additional_filters = {
"properties/case_type": case_type,
"footprint": True
}
helper = BaseSessionDataHelper(domain, request.couch_user)
result = helper.filter_cases(xpath, additional_filters, DjangoAuth(auth_cookie),
extra_instances=extra_instances, use_formplayer=use_formplayer)
if result.get('status', None) == 'error':
code = result.get('code', 500)
message = result.get('message', _("Something went wrong filtering your cases."))
if code == 500:
notify_exception(None, message=message)
return json_response(message, status_code=code)
case_ids = result.get("cases", [])
else:
# otherwise just use our built in api with the defaults
case_ids = [res.id for res in get_filtered_cases(
domain,
status=CASE_STATUS_OPEN,
case_type=case_type,
user_id=request.couch_user._id,
footprint=True,
ids_only=True,
)]
cases = accessor.get_cases(case_ids)
if parent_id:
cases = filter(lambda c: c.parent and c.parent.case_id == parent_id, cases)
# refilter these because we might have accidentally included footprint cases
# in the results from touchforms. this is a little hacky but the easiest
# (quick) workaround. should be revisted when we optimize the case list.
cases = filter(lambda c: c.type == case_type, cases)
cases = [c.to_api_json(lite=True) for c in cases if c]
response = {'cases': cases}
if requires_parent_cases:
# Subtract already fetched cases from parent list
parent_ids = set(map(lambda c: c['indices']['parent']['case_id'], cases)) - \
set(map(lambda c: c['case_id'], cases))
parents = accessor.get_cases(list(parent_ids))
parents = [c.to_api_json(lite=True) for c in parents]
response.update({'parents': parents})
return json_response(response)
@cloudcare_api
def get_apps_api(request, domain):
return json_response(get_cloudcare_apps(domain))
@cloudcare_api
def get_app_api(request, domain, app_id):
try:
return json_response(look_up_app_json(domain, app_id))
except RemoteAppError:
raise Http404()
@cloudcare_api
@cache_page(60 * 30)
def get_fixtures(request, domain, user_id, fixture_id=None):
try:
user = CommCareUser.get_by_user_id(user_id)
except CouchUser.AccountTypeError:
err = ("You can't use case sharing or fixtures as a %s. "
"Login as a mobile worker and try again.") % settings.WEB_USER_TERM,
return HttpResponse(err, status=412, content_type="text/plain")
if not user:
raise Http404
assert user.is_member_of(domain)
restore_user = user.to_ota_restore_user()
if not fixture_id:
ret = ElementTree.Element("fixtures")
for fixture in generator.get_fixtures(restore_user, version=V2):
ret.append(fixture)
return HttpResponse(ElementTree.tostring(ret), content_type="text/xml")
else:
fixture = generator.get_fixture_by_id(fixture_id, restore_user, version=V2)
if not fixture:
raise Http404
assert len(fixture.getchildren()) == 1, 'fixture {} expected 1 child but found {}'.format(
fixture_id, len(fixture.getchildren())
)
return HttpResponse(ElementTree.tostring(fixture.getchildren()[0]), content_type="text/xml")
@cloudcare_api
def get_sessions(request, domain):
# is it ok to pull user from the request? other api calls seem to have an explicit 'user' param
skip = request.GET.get('skip') or 0
limit = request.GET.get('limit') or 10
return json_response(get_open_form_sessions(request.user, skip=skip, limit=limit))
@cloudcare_api
def get_session_context(request, domain, session_id):
# NOTE: although this view does not appeared to be called from anywhere it is, and cannot be deleted.
# The javascript routing in cloudcare depends on it, though constructs it manually in a hardcoded way.
# see getSessionContextUrl in cloudcare/util.js
# Adding 'cloudcare_get_session_context' to this comment so that the url name passes a grep test
try:
session = EntrySession.objects.get(session_id=session_id)
except EntrySession.DoesNotExist:
session = None
if request.method == 'DELETE':
if session:
session.delete()
return json_response({'status': 'success'})
else:
helper = BaseSessionDataHelper(domain, request.couch_user)
return json_response(helper.get_full_context({
'session_id': session_id,
'app_id': session.app_id if session else None
}))
@cloudcare_api
def get_ledgers(request, domain):
"""
Returns ledgers associated with a case in the format:
{
"section_id": {
"product_id": amount,
"product_id": amount,
...
},
...
}
Note: this only works for the Couch backend
"""
request_params = request.GET
case_id = request_params.get('case_id')
if not case_id:
return json_response(
{'message': 'You must specify a case id to make this query.'},
status_code=400
)
try:
case = CaseAccessors(domain).get_case(case_id)
except CaseNotFound:
raise Http404()
ledger_map = LedgerAccessors(domain).get_case_ledger_state(case.case_id)
def custom_json_handler(obj):
if hasattr(obj, 'stock_on_hand'):
return obj.stock_on_hand
return json_handler(obj)
return json_response(
{
'entity_id': case_id,
'ledger': ledger_map,
},
default=custom_json_handler,
)
@cloudcare_api
def sync_db_api(request, domain):
auth_cookie = request.COOKIES.get('sessionid')
username = request.GET.get('username')
try:
response = sync_db(username, domain, DjangoAuth(auth_cookie))
except Exception, e:
return json_response(
{'status': 'error', 'message': unicode(e)},
status_code=500
)
else:
return json_response(response)
class ReadableQuestions(View):
urlname = 'readable_questions'
@csrf_exempt
@method_decorator(cloudcare_api)
def dispatch(self, request, *args, **kwargs):
return super(ReadableQuestions, self).dispatch(request, *args, **kwargs)
def post(self, request, domain):
instance_xml = request.POST.get('instanceXml').encode('utf-8')
app_id = request.POST.get('appId')
xmlns = request.POST.get('xmlns')
_, form_data_json = xml2json(instance_xml)
pretty_questions = readable.get_questions(domain, app_id, xmlns)
readable_form = readable.get_readable_form_data(form_data_json, pretty_questions)
rendered_readable_form = render_to_string(
'reports/form/partials/readable_form.html',
{'questions': readable_form}
)
return json_response({
'form_data': rendered_readable_form,
'form_questions': pretty_questions
})
@cloudcare_api
def render_form(request, domain):
# get session
session_id = request.GET.get('session_id')
session = get_object_or_404(EntrySession, session_id=session_id)
try:
raw_instance = get_raw_instance(session_id, domain)
except Exception, e:
return HttpResponse(e, status=500, content_type="text/plain")
xmlns = raw_instance["xmlns"]
form_data_xml = raw_instance["output"]
_, form_data_json = xml2json(form_data_xml)
pretty_questions = readable.get_questions(domain, session.app_id, xmlns)
readable_form = readable.get_readable_form_data(form_data_json, pretty_questions)
rendered_readable_form = render_to_string(
'reports/form/partials/readable_form.html',
{'questions': readable_form}
)
return json_response({
'form_data': rendered_readable_form,
'instance_xml': indent_xml(form_data_xml)
})
class HttpResponseConflict(HttpResponse):
status_code = 409
class EditCloudcareUserPermissionsView(BaseUserSettingsView):
template_name = 'cloudcare/config.html'
urlname = 'cloudcare_app_settings'
@property
def page_title(self):
if toggles.USE_FORMPLAYER_FRONTEND.enabled(self.domain):
return _("Web Apps Permissions")
else:
return _("CloudCare Permissions")
@method_decorator(domain_admin_required)
@method_decorator(requires_privilege_with_fallback(privileges.CLOUDCARE))
def dispatch(self, request, *args, **kwargs):
return super(EditCloudcareUserPermissionsView, self).dispatch(request, *args, **kwargs)
@property
def page_context(self):
apps = get_cloudcare_apps(self.domain)
access = ApplicationAccess.get_template_json(self.domain, apps)
groups = Group.by_domain(self.domain)
return {
'apps': apps,
'groups': groups,
'access': access,
}
def put(self, request, *args, **kwargs):
j = json.loads(request.body)
old = ApplicationAccess.get_by_domain(self.domain)
new = ApplicationAccess.wrap(j)
old.restrict = new.restrict
old.app_groups = new.app_groups
try:
if old._rev != new._rev or old._id != new._id:
raise ResourceConflict()
old.save()
except ResourceConflict:
return HttpResponseConflict()
else:
return json_response({'_rev': old._rev})
|
qedsoftware/commcare-hq
|
corehq/apps/cloudcare/views.py
|
Python
|
bsd-3-clause
| 29,447 | 0.002241 |
# -*- encoding: utf-8 -*-
from abjad.tools.durationtools import Duration
from abjad.tools.rhythmtreetools import RhythmTreeContainer, RhythmTreeLeaf
def test_rhythmtreetools_RhythmTreeNode_duration_01():
tree = RhythmTreeContainer(preprolated_duration=1, children=[
RhythmTreeLeaf(preprolated_duration=1),
RhythmTreeContainer(preprolated_duration=2, children=[
RhythmTreeLeaf(preprolated_duration=3),
RhythmTreeLeaf(preprolated_duration=2)
]),
RhythmTreeLeaf(preprolated_duration=2)
])
assert tree.duration == Duration(1)
assert tree[0].duration == Duration(1, 5)
assert tree[1].duration == Duration(2, 5)
assert tree[1][0].duration == Duration(6, 25)
assert tree[1][1].duration == Duration(4, 25)
assert tree[2].duration == Duration(2, 5)
tree[1].append(tree.pop())
assert tree.duration == Duration(1)
assert tree[0].duration == Duration(1, 3)
assert tree[1].duration == Duration(2, 3)
assert tree[1][0].duration == Duration(2, 7)
assert tree[1][1].duration == Duration(4, 21)
assert tree[1][2].duration == Duration(4, 21)
tree.preprolated_duration = 19
assert tree.duration == Duration(19)
assert tree[0].duration == Duration(19, 3)
assert tree[1].duration == Duration(38, 3)
assert tree[1][0].duration == Duration(38, 7)
assert tree[1][1].duration == Duration(76, 21)
assert tree[1][2].duration == Duration(76, 21)
|
mscuthbert/abjad
|
abjad/tools/rhythmtreetools/test/test_rhythmtreetools_RhythmTreeNode_duration.py
|
Python
|
gpl-3.0
| 1,469 | 0.000681 |
import datetime
from dateutil.relativedelta import relativedelta
from decimal import Decimal
import factory
from factory.fuzzy import FuzzyDate, FuzzyInteger
import random
from django.contrib.auth import models as auth
from django.contrib.auth.hashers import make_password
from timepiece.contracts import models as contracts
from timepiece.crm import models as crm
from timepiece.entries import models as entries
from timepiece import utils
class User(factory.DjangoModelFactory):
FACTORY_FOR = auth.User
# FIXME: Some tests depend on first_name/last_name being unique.
first_name = factory.Sequence(lambda n: 'Sam{0}'.format(n))
last_name = factory.Sequence(lambda n: 'Blue{0}'.format(n))
username = factory.Sequence(lambda n: 'user{0}'.format(n))
email = factory.Sequence(lambda n: 'user{0}@example.com'.format(n))
password = factory.LazyAttribute(lambda n: make_password('password'))
@factory.post_generation
def permissions(self, create, extracted, **kwargs):
if create and extracted:
for perm in extracted:
if isinstance(perm, basestring):
app_label, codename = perm.split('.')
perm = auth.Permission.objects.get(
content_type__app_label=app_label,
codename=codename,
)
self.user_permissions.add(perm)
class Superuser(User):
is_superuser = True
is_staff = True
class Group(factory.DjangoModelFactory):
FACTORY_FOR = auth.Group
name = factory.Sequence(lambda n: 'group{0}'.format(n))
class ProjectContract(factory.DjangoModelFactory):
FACTORY_FOR = contracts.ProjectContract
name = factory.Sequence(lambda n: 'contract{0}'.format(n))
start_date = datetime.date.today()
end_date = datetime.date.today() + relativedelta(weeks=2)
status = contracts.ProjectContract.STATUS_CURRENT,
type = contracts.ProjectContract.PROJECT_PRE_PAID_HOURLY
@factory.post_generation
def contract_hours(self, create, extracted, **kwargs):
if create:
num_hours = extracted or random.randint(10, 400)
for i in range(2):
ContractHour(contract=self,
hours=Decimal(str(num_hours/2.0)))
@factory.post_generation
def projects(self, create, extracted, **kwargs):
if create and extracted:
self.projects.add(*extracted)
class ContractHour(factory.DjangoModelFactory):
FACTORY_FOR = contracts.ContractHour
date_requested = datetime.date.today()
status = contracts.ContractHour.APPROVED_STATUS
contract = factory.SubFactory('timepiece.tests.factories.ProjectContract')
class ContractAssignment(factory.DjangoModelFactory):
FACTORY_FOR = contracts.ContractAssignment
user = factory.SubFactory('timepiece.tests.factories.User')
contract = factory.SubFactory('timepiece.tests.factories.ProjectContract')
start_date = datetime.date.today()
end_date = datetime.date.today() + relativedelta(weeks=2)
class HourGroup(factory.DjangoModelFactory):
FACTORY_FOR = contracts.HourGroup
name = factory.Sequence(lambda n: 'hourgroup{0}'.format(n))
class EntryGroup(factory.DjangoModelFactory):
FACTORY_FOR = contracts.EntryGroup
user = factory.SubFactory('timepiece.tests.factories.User')
project = factory.SubFactory('timepiece.tests.factories.Project')
end = FuzzyDate(datetime.date.today() - relativedelta(months=1))
class TypeAttribute(factory.DjangoModelFactory):
FACTORY_FOR = crm.Attribute
label = factory.Sequence(lambda n: 'type{0}'.format(n))
type = crm.Attribute.PROJECT_TYPE
class StatusAttribute(factory.DjangoModelFactory):
FACTORY_FOR = crm.Attribute
label = factory.Sequence(lambda n: 'status{0}'.format(n))
type = crm.Attribute.PROJECT_STATUS
class Business(factory.DjangoModelFactory):
FACTORY_FOR = crm.Business
name = factory.Sequence(lambda n: 'business{0}'.format(n))
class Project(factory.DjangoModelFactory):
FACTORY_FOR = crm.Project
name = factory.Sequence(lambda n: 'project{0}'.format(n))
business = factory.SubFactory('timepiece.tests.factories.Business')
point_person = factory.SubFactory('timepiece.tests.factories.User')
type = factory.SubFactory('timepiece.tests.factories.TypeAttribute')
status = factory.SubFactory('timepiece.tests.factories.StatusAttribute')
class BillableProject(Project):
type = factory.SubFactory('timepiece.tests.factories.TypeAttribute', billable=True)
status = factory.SubFactory('timepiece.tests.factories.StatusAttribute', billable=True)
class NonbillableProject(Project):
type = factory.SubFactory('timepiece.tests.factories.TypeAttribute', billable=False)
status = factory.SubFactory('timepiece.tests.factories.StatusAttribute', billable=False)
class RelationshipType(factory.DjangoModelFactory):
FACTORY_FOR = crm.RelationshipType
name = factory.Sequence(lambda n: 'reltype{0}'.format(n))
class ProjectRelationship(factory.DjangoModelFactory):
FACTORY_FOR = crm.ProjectRelationship
user = factory.SubFactory('timepiece.tests.factories.User')
project = factory.SubFactory('timepiece.tests.factories.Project')
class UserProfile(factory.DjangoModelFactory):
FACTORY_FOR = crm.UserProfile
user = factory.SubFactory('timepiece.tests.factories.User')
class Activity(factory.DjangoModelFactory):
FACTORY_FOR = entries.Activity
code = factory.Sequence(lambda n: 'a{0}'.format(n))
name = factory.Sequence(lambda n: 'activity{0}'.format(n))
class BillableActivityFactory(Activity):
billable = True
class NonbillableActivityFactory(Activity):
billable = False
class ActivityGroup(factory.DjangoModelFactory):
FACTORY_FOR = entries.ActivityGroup
name = factory.Sequence(lambda n: 'activitygroup{0}'.format(n))
class Location(factory.DjangoModelFactory):
FACTORY_FOR = entries.Location
name = factory.Sequence(lambda n: 'location{0}'.format(n))
slug = factory.Sequence(lambda n: 'location{0}'.format(n))
class Entry(factory.DjangoModelFactory):
FACTORY_FOR = entries.Entry
status = entries.Entry.UNVERIFIED
user = factory.SubFactory('timepiece.tests.factories.User')
activity = factory.SubFactory('timepiece.tests.factories.Activity')
project = factory.SubFactory('timepiece.tests.factories.Project')
location = factory.SubFactory('timepiece.tests.factories.Location')
class ProjectHours(factory.DjangoModelFactory):
FACTORY_FOR = entries.ProjectHours
week_start = utils.get_week_start()
project = factory.SubFactory('timepiece.tests.factories.Project')
user = factory.SubFactory('timepiece.tests.factories.User')
hours = FuzzyInteger(0, 20)
|
dannybrowne86/django-timepiece
|
timepiece/tests/factories.py
|
Python
|
mit
| 6,805 | 0.000882 |
"""
Base test case for the course API views.
"""
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from lms.djangoapps.courseware.tests.factories import StaffFactory
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import TEST_DATA_SPLIT_MODULESTORE, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
# pylint: disable=unused-variable
class BaseCourseViewTest(SharedModuleStoreTestCase, APITestCase):
"""
Base test class for course data views.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
view_name = None # The name of the view to use in reverse() call in self.get_url()
@classmethod
def setUpClass(cls):
super(BaseCourseViewTest, cls).setUpClass()
cls.course = CourseFactory.create(display_name='test course', run="Testing_course")
cls.course_key = cls.course.id
cls.password = 'test'
cls.student = UserFactory(username='dummy', password=cls.password)
cls.staff = StaffFactory(course_key=cls.course.id, password=cls.password)
cls.initialize_course(cls.course)
@classmethod
def initialize_course(cls, course):
"""
Sets up the structure of the test course.
"""
course.self_paced = True
cls.store.update_item(course, cls.staff.id)
cls.section = ItemFactory.create(
parent_location=course.location,
category="chapter",
)
cls.subsection1 = ItemFactory.create(
parent_location=cls.section.location,
category="sequential",
)
unit1 = ItemFactory.create(
parent_location=cls.subsection1.location,
category="vertical",
)
ItemFactory.create(
parent_location=unit1.location,
category="video",
)
ItemFactory.create(
parent_location=unit1.location,
category="problem",
)
cls.subsection2 = ItemFactory.create(
parent_location=cls.section.location,
category="sequential",
)
unit2 = ItemFactory.create(
parent_location=cls.subsection2.location,
category="vertical",
)
unit3 = ItemFactory.create(
parent_location=cls.subsection2.location,
category="vertical",
)
ItemFactory.create(
parent_location=unit3.location,
category="video",
)
ItemFactory.create(
parent_location=unit3.location,
category="video",
)
def get_url(self, course_id):
"""
Helper function to create the url
"""
return reverse(
self.view_name,
kwargs={
'course_id': course_id
}
)
|
cpennington/edx-platform
|
cms/djangoapps/contentstore/api/tests/base.py
|
Python
|
agpl-3.0
| 2,906 | 0.001376 |
#!/usr/bin/python
#
# \file 2_build.py
# \brief Build rbank
# \date 2009-03-10-22-43-GMT
# \author Jan Boon (Kaetemi)
# Python port of game data build pipeline.
# Build rbank
#
# NeL - MMORPG Framework <http://dev.ryzom.com/projects/nel/>
# Copyright (C) 2010 Winch Gate Property Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time, sys, os, shutil, subprocess, distutils.dir_util
sys.path.append("../../configuration")
if os.path.isfile("log.log"):
os.remove("log.log")
log = open("log.log", "w")
from scripts import *
from buildsite import *
from process import *
from tools import *
from directories import *
printLog(log, "")
printLog(log, "-------")
printLog(log, "--- Build rbank")
printLog(log, "-------")
printLog(log, time.strftime("%Y-%m-%d %H:%MGMT", time.gmtime(time.time())))
printLog(log, "")
# Find tools
BuildIgBoxes = findTool(log, ToolDirectories, BuildIgBoxesTool, ToolSuffix)
ExecTimeout = findTool(log, ToolDirectories, ExecTimeoutTool, ToolSuffix)
BuildRbank = findTool(log, ToolDirectories, BuildRbankTool, ToolSuffix)
GetNeighbors = findTool(log, ToolDirectories, GetNeighborsTool, ToolSuffix)
BuildIndoorRbank = findTool(log, ToolDirectories, BuildIndoorRbankTool, ToolSuffix)
# AiBuildWmap = findTool(log, ToolDirectories, AiBuildWmapTool, ToolSuffix)
printLog(log, "")
# Build rbank bbox
printLog(log, ">>> Build rbank bbox <<<")
if BuildIgBoxes == "":
toolLogFail(log, BuildIgBoxesTool, ToolSuffix)
else:
mkPath(log, ExportBuildDirectory + "/" + RbankBboxBuildDirectory)
cf = open("build_ig_boxes.cfg", "w")
cf.write("\n")
cf.write("Pathes = {\n")
for dir in IgLookupDirectories:
mkPath(log, ExportBuildDirectory + "/" + dir)
cf.write("\t\"" + ExportBuildDirectory + "/" + dir + "\", \n")
for dir in ShapeLookupDirectories:
mkPath(log, ExportBuildDirectory + "/" + dir)
cf.write("\t\"" + ExportBuildDirectory + "/" + dir + "\", \n")
cf.write("};\n")
cf.write("\n")
cf.write("IGs = {\n")
for dir in IgLookupDirectories:
files = findFiles(log, ExportBuildDirectory + "/" + dir, "", ".ig")
for file in files:
cf.write("\t\"" + os.path.basename(file)[0:-len(".ig")] + "\", \n")
cf.write("};\n")
cf.write("\n")
cf.write("Output = \"" + ExportBuildDirectory + "/" + RbankBboxBuildDirectory + "/temp.bbox\";\n")
cf.write("\n")
cf.close()
subprocess.call([ BuildIgBoxes ])
os.remove("build_ig_boxes.cfg")
printLog(log, "")
printLog(log, ">>> Build rbank build config <<<")
cf = open("build_rbank.cfg", "w")
cf.write("\n")
cf.write("// Rbank settings\n")
cf.write("\n")
cf.write("Verbose = " + str(RBankVerbose) + ";\n")
cf.write("CheckConsistency = " + str(RBankConsistencyCheck) + ";\n")
mkPath(log, ExportBuildDirectory + "/" + ZoneWeldBuildDirectory)
cf.write("ZonePath = \"" + ExportBuildDirectory + "/" + ZoneWeldBuildDirectory + "/\";\n")
mkPath(log, ExportBuildDirectory + "/" + SmallbankExportDirectory)
cf.write("BanksPath = \"" + ExportBuildDirectory + "/" + SmallbankExportDirectory + "/\";\n")
cf.write("Bank = \"" + ExportBuildDirectory + "/" + SmallbankExportDirectory + "/" + BankTileBankName + ".smallbank\";\n")
cf.write("ZoneExt = \".zonew\";\n")
cf.write("ZoneNHExt = \".zonenhw\";\n")
cf.write("IGBoxes = \"" + ExportBuildDirectory + "/" + RbankBboxBuildDirectory + "/temp.bbox\";\n")
mkPath(log, LeveldesignWorldDirectory)
cf.write("LevelDesignWorldPath = \"" + LeveldesignWorldDirectory + "\";\n")
mkPath(log, ExportBuildDirectory + "/" + IgLandBuildDirectory)
cf.write("IgLandPath = \"" + ExportBuildDirectory + "/" + IgLandBuildDirectory + "\";\n")
mkPath(log, ExportBuildDirectory + "/" + IgOtherBuildDirectory)
cf.write("IgVillagePath = \"" + ExportBuildDirectory + "/" + IgOtherBuildDirectory + "\";\n")
cf.write("\n")
mkPath(log, ExportBuildDirectory + "/" + RbankTessellationBuildDirectory)
cf.write("TessellationPath = \"" + ExportBuildDirectory + "/" + RbankTessellationBuildDirectory + "/\";\n")
cf.write("TessellateLevel = " + str(BuildQuality) + ";\n") # BuildQuality
cf.write("\n")
cf.write("WaterThreshold = 1.0;\n")
cf.write("\n")
cf.write("OutputRootPath = \"" + ExportBuildDirectory + "/\";\n")
mkPath(log, ExportBuildDirectory + "/" + RbankSmoothBuildDirectory)
cf.write("SmoothDirectory = \"" + RbankSmoothBuildDirectory + "/\";\n")
mkPath(log, ExportBuildDirectory + "/" + RbankRawBuildDirectory)
cf.write("RawDirectory = \"" + RbankRawBuildDirectory + "/\";\n")
cf.write("\n")
cf.write("ReduceSurfaces = " + str(RbankReduceSurfaces) + ";\n")
cf.write("SmoothBorders = " + str(RbankSmoothBorders) + ";\n")
cf.write("\n")
cf.write("ComputeElevation = " + str(RbankComputeElevation) + ";\n")
cf.write("ComputeLevels = " + str(RbankComputeLevels) + ";\n")
cf.write("\n")
cf.write("LinkElements = " + str(RbankLinkElements) + ";\n")
cf.write("\n")
cf.write("CutEdges = " + str(RbankCutEdges) + ";\n")
cf.write("\n")
cf.write("UseZoneSquare = " + str(RbankUseZoneSquare) + ";\n")
cf.write("\n")
cf.write("// The whole landscape\n")
cf.write("ZoneUL = \"" + RbankZoneUl + "\";\n")
cf.write("ZoneDR = \"" + RbankZoneDr + "\";\n")
cf.write("\n")
mkPath(log, ExportBuildDirectory + "/" + RbankPreprocBuildDirectory)
cf.write("PreprocessDirectory = \"" + ExportBuildDirectory + "/" + RbankPreprocBuildDirectory + "/\";\n")
cf.write("\n")
cf.write("// The global retriever processing settings\n")
cf.write("GlobalRetriever = \"temp.gr\";\n")
cf.write("RetrieverBank = \"temp.rbank\";\n")
cf.write("\n")
cf.write("GlobalUL = \"" + RbankZoneUl + "\";\n")
cf.write("GlobalDR = \"" + RbankZoneDr + "\";\n")
cf.write("\n")
cf.write("// Which kind of stuff to do\n")
cf.write("TessellateZones = 0;\n")
cf.write("MoulineZones = 0;\n")
cf.write("ProcessRetrievers = 0;\n")
cf.write("ProcessGlobal = 0;\n")
cf.write("\n")
cf.write("Zones = {\n")
mkPath(log, ExportBuildDirectory + "/" + ZoneWeldBuildDirectory)
files = findFiles(log, ExportBuildDirectory + "/" + ZoneWeldBuildDirectory, "", ".zonew")
for file in files:
cf.write("\t\"" + os.path.basename(file) + "\", \n")
cf.write("};\n")
cf.write("\n")
cf.write("Pathes = {\n")
for dir in IgLookupDirectories:
mkPath(log, ExportBuildDirectory + "/" + dir)
cf.write("\t\"" + ExportBuildDirectory + "/" + dir + "\", \n")
for dir in ShapeLookupDirectories:
mkPath(log, ExportBuildDirectory + "/" + dir)
cf.write("\t\"" + ExportBuildDirectory + "/" + dir + "\", \n")
cf.write("};\n")
cf.write("\n")
cf.close()
printLog(log, "")
printLog(log, ">>> Build rbank check prims <<<")
if BuildRbank == "":
toolLogFail(log, BuildRbankTool, ToolSuffix)
elif ExecTimeout == "":
toolLogFail(log, ExecTimeoutTool, ToolSuffix)
else:
subprocess.call([ ExecTimeout, str(RbankBuildTesselTimeout), BuildRbank, "-C", "-p", "-g" ])
printLog(log, "")
printLog(log, ">>> Build rbank process all passes <<<")
if BuildRbank == "":
toolLogFail(log, BuildRbankTool, ToolSuffix)
if GetNeighbors == "":
toolLogFail(log, GetNeighborsTool, ToolSuffix)
elif ExecTimeout == "":
toolLogFail(log, ExecTimeoutTool, ToolSuffix)
else:
zonefiles = findFiles(log, ExportBuildDirectory + "/" + ZoneWeldBuildDirectory, "", ".zonew")
for zonefile in zonefiles:
zone = os.path.basename(zonefile)[0:-len(".zonew")]
lr1 = ExportBuildDirectory + "/" + RbankSmoothBuildDirectory + "/" + zone + ".lr"
nearzones = subprocess.Popen([ GetNeighbors, zone ], stdout = subprocess.PIPE).communicate()[0].strip().split(" ")
printLog(log, "ZONE " + zone + ": " + str(nearzones))
zone_to_build = 0
for nearzone in nearzones:
sourcePath = ExportBuildDirectory + "/" + ZoneWeldBuildDirectory + "/" + nearzone + ".zonew"
if (os.path.isfile(sourcePath)):
if (needUpdate(log, sourcePath, lr1)):
zone_to_build = 1
sourcePath = ExportBuildDirectory + "/" + ZoneWeldBuildDirectory + "/" + zone + ".zonew"
if zone_to_build:
printLog(log, sourcePath + " -> " + lr1)
subprocess.call([ ExecTimeout, str(RbankBuildTesselTimeout), BuildRbank, "-c", "-P", "-g", os.path.basename(zonefile) ])
else:
printLog(log, "SKIP " + lr1)
printLog(log, "")
printLog(log, ">>> Build rbank process global <<<") # TODO: Check if the LR changed?
if BuildRbank == "":
toolLogFail(log, BuildRbankTool, ToolSuffix)
elif ExecTimeout == "":
toolLogFail(log, ExecTimeoutTool, ToolSuffix)
else:
subprocess.call([ ExecTimeout, str(RbankBuildProcglobalTimeout), BuildRbank, "-c", "-P", "-G" ])
printLog(log, "")
os.remove("build_rbank.cfg")
printLog(log, ">>> Build rbank indoor <<<")
if BuildIndoorRbank == "":
toolLogFail(log, BuildIndoorRbankTool, ToolSuffix)
elif ExecTimeout == "":
toolLogFail(log, ExecTimeoutTool, ToolSuffix)
else:
retrieversDir = ExportBuildDirectory + "/" + RbankRetrieversBuildDirectory
mkPath(log, retrieversDir)
removeFilesRecursiveExt(log, retrieversDir, ".rbank")
removeFilesRecursiveExt(log, retrieversDir, ".gr")
removeFilesRecursiveExt(log, retrieversDir, ".lr")
cf = open("build_indoor_rbank.cfg", "w")
cf.write("\n")
mkPath(log, ExportBuildDirectory + "/" + RBankCmbExportDirectory)
cf.write("MeshPath = \"" + ExportBuildDirectory + "/" + RBankCmbExportDirectory + "/\";\n")
# cf.write("Meshes = { };\n")
cf.write("Meshes = \n")
cf.write("{\n")
meshFiles = findFilesNoSubdir(log, ExportBuildDirectory + "/" + RBankCmbExportDirectory, ".cmb")
lenCmbExt = len(".cmb")
for file in meshFiles:
cf.write("\t\"" + file[0:-lenCmbExt] + "\", \n")
cf.write("};\n")
cf.write("OutputPath = \"" + retrieversDir + "/\";\n")
# mkPath(log, ExportBuildDirectory + "/" + RbankOutputBuildDirectory)
# cf.write("OutputPath = \"" + ExportBuildDirectory + "/" + RbankOutputBuildDirectory + "/\";\n")
cf.write("OutputPrefix = \"unused\";\n")
cf.write("Merge = 1;\n")
mkPath(log, ExportBuildDirectory + "/" + RbankSmoothBuildDirectory)
cf.write("MergePath = \"" + ExportBuildDirectory + "/" + RbankSmoothBuildDirectory + "/\";\n")
cf.write("MergeInputPrefix = \"temp\";\n")
cf.write("MergeOutputPrefix = \"tempMerged\";\n")
# cf.write("MergeOutputPrefix = \"" + RbankRbankName + "\";\n")
cf.write("AddToRetriever = 1;\n")
cf.write("\n")
cf.close()
subprocess.call([ ExecTimeout, str(RbankBuildIndoorTimeout), BuildIndoorRbank ])
os.remove("build_indoor_rbank.cfg")
printLog(log, "")
retrieversDir = ExportBuildDirectory + "/" + RbankRetrieversBuildDirectory
mkPath(log, retrieversDir)
outputDir = ExportBuildDirectory + "/" + RbankOutputBuildDirectory
mkPath(log, outputDir)
printLog(log, ">>> Move gr, rbank and lr <<<")
if needUpdateDirNoSubdir(log, retrieversDir, outputDir):
removeFilesRecursiveExt(log, outputDir, ".rbank")
removeFilesRecursiveExt(log, outputDir, ".gr")
removeFilesRecursiveExt(log, outputDir, ".lr")
copyFilesRenamePrefixExt(log, retrieversDir, outputDir, "tempMerged", RbankRbankName, ".rbank")
copyFilesRenamePrefixExt(log, retrieversDir, outputDir, "tempMerged", RbankRbankName, ".gr")
copyFilesRenamePrefixExt(log, retrieversDir, outputDir, "tempMerged_", RbankRbankName + "_", ".lr")
else:
printLog(log, "SKIP *")
log.close()
# end of file
|
osgcc/ryzom
|
nel/tools/build_gamedata/processes/rbank/2_build.py
|
Python
|
agpl-3.0
| 11,551 | 0.013159 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1alpha1RoleList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'items': 'list[V1alpha1Role]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1alpha1RoleList - a model defined in Swagger
"""
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1alpha1RoleList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1alpha1RoleList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1alpha1RoleList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1alpha1RoleList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1alpha1RoleList.
Items is a list of Roles
:return: The items of this V1alpha1RoleList.
:rtype: list[V1alpha1Role]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1alpha1RoleList.
Items is a list of Roles
:param items: The items of this V1alpha1RoleList.
:type: list[V1alpha1Role]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1alpha1RoleList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1alpha1RoleList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1alpha1RoleList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1alpha1RoleList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1alpha1RoleList.
Standard object's metadata.
:return: The metadata of this V1alpha1RoleList.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1alpha1RoleList.
Standard object's metadata.
:param metadata: The metadata of this V1alpha1RoleList.
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1alpha1RoleList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
mbohlool/client-python
|
kubernetes/client/models/v1alpha1_role_list.py
|
Python
|
apache-2.0
| 6,307 | 0.001586 |
# -*- coding: utf-8 -*-
"""
priority: HTTP/2 priority implementation for Python
"""
from .priority import ( # noqa
Stream,
PriorityTree,
DeadlockError,
PriorityLoop,
PriorityError,
DuplicateStreamError,
MissingStreamError,
TooManyStreamsError,
BadWeightError,
PseudoStreamError,
)
__version__ = "2.0.0"
|
python-hyper/priority
|
src/priority/__init__.py
|
Python
|
mit
| 346 | 0 |
# Copyright (c) 2014-2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from .base_commandline_predictor import BaseCommandlinePredictor
from .parsing import parse_netmhccons_stdout
class NetMHCcons(BaseCommandlinePredictor):
def __init__(
self,
alleles,
program_name="netMHCcons",
process_limit=0,
default_peptide_lengths=[9]):
BaseCommandlinePredictor.__init__(
self,
program_name=program_name,
alleles=alleles,
parse_output_fn=parse_netmhccons_stdout,
# netMHCcons does not have a supported allele flag
supported_alleles_flag=None,
length_flag="-length",
input_file_flag="-f",
allele_flag="-a",
peptide_mode_flags=["-inptype", "1"],
tempdir_flag="-tdir",
process_limit=process_limit,
default_peptide_lengths=default_peptide_lengths,
group_peptides_by_length=True)
|
hammerlab/mhctools
|
mhctools/netmhc_cons.py
|
Python
|
apache-2.0
| 1,609 | 0.000622 |
uname = ParseFunction('uname -a > {OUT}')
for group in ('disc', 'ccl', 'gh'):
batch_options = 'requirements = MachineGroup == "{0}"'.format(group)
uname(outputs='uname.{0}'.format(group), environment={'BATCH_OPTIONS': batch_options})
#for group in ('disc', 'ccl', 'gh'):
# with Options(batch='requirements = MachineGroup == "{0}"'.format(group)):
# uname(outputs='uname.{0}'.format(group))
|
isanwong/cctools
|
weaver/src/examples/batch.py
|
Python
|
gpl-2.0
| 410 | 0.004878 |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from pipeline.backend.pipeline import PipeLine
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
data_base = config.data_base_dir
# partition for data storage
partition = 4
# table name and namespace, used in FATE job configuration
dense_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
tag_data = {"name": "tag_value_1", "namespace": f"experiment{namespace}"}
pipeline_upload = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest)
# add upload data info
# path to csv file(s) to be uploaded
pipeline_upload.add_upload_data(file=os.path.join(data_base, "examples/data/breast_hetero_guest.csv"),
table_name=dense_data["name"], # table name
namespace=dense_data["namespace"], # namespace
head=1, partition=partition, # data info
id_delimiter=",",
extend_sid=True)
pipeline_upload.add_upload_data(file=os.path.join(data_base, "examples/data/tag_value_1000_140.csv"),
table_name=tag_data["name"],
namespace=tag_data["namespace"],
head=0, partition=partition,
id_delimiter=",",
extend_sid=True)
# upload both data
pipeline_upload.upload(drop=1)
if __name__ == "__main__":
main()
|
FederatedAI/FATE
|
examples/pipeline/upload/pipeline-upload-extend-sid.py
|
Python
|
apache-2.0
| 2,416 | 0.003311 |
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="surface.hoverlabel.font", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/surface/hoverlabel/font/_color.py
|
Python
|
mit
| 470 | 0.002128 |
import os, sys, warnings, operator
import numbers
import itertools
import string, fnmatch
import unicodedata
from collections import defaultdict
import numpy as np
import param
try:
from cyordereddict import OrderedDict
except:
from collections import OrderedDict
try:
import pandas as pd # noqa (optional import)
except ImportError:
pd = None
try:
import dask.dataframe as dd
except ImportError:
dd = None
# Python3 compatibility
import types
if sys.version_info.major == 3:
basestring = str
unicode = str
generator_types = (zip, range, types.GeneratorType)
else:
basestring = basestring
unicode = unicode
from itertools import izip
generator_types = (izip, xrange, types.GeneratorType)
def process_ellipses(obj, key, vdim_selection=False):
"""
Helper function to pad a __getitem__ key with the right number of
empty slices (i.e :) when the key contains an Ellipsis (...).
If the vdim_selection flag is true, check if the end of the key
contains strings or Dimension objects in obj. If so, extra padding
will not be applied for the value dimensions (i.e the resulting key
will be exactly one longer than the number of kdims). Note: this
flag should not be used for composite types.
"""
if isinstance(key, np.ndarray) and key.dtype.kind == 'b':
return key
wrapped_key = wrap_tuple(key)
if wrapped_key.count(Ellipsis)== 0:
return key
if wrapped_key.count(Ellipsis)!=1:
raise Exception("Only one ellipsis allowed at a time.")
dim_count = len(obj.dimensions())
index = wrapped_key.index(Ellipsis)
head = wrapped_key[:index]
tail = wrapped_key[index+1:]
padlen = dim_count - (len(head) + len(tail))
if vdim_selection:
# If the end of the key (i.e the tail) is in vdims, pad to len(kdims)+1
if wrapped_key[-1] in obj.vdims:
padlen = (len(obj.kdims) +1 ) - len(head+tail)
return head + ((slice(None),) * padlen) + tail
def safe_unicode(value):
if sys.version_info.major == 3 or not isinstance(value, str): return value
else: return unicode(value.decode('utf-8'))
def capitalize_unicode_name(s):
"""
Turns a string such as 'capital delta' into the shortened,
capitalized version, in this case simply 'Delta'. Used as a
transform in sanitize_identifier.
"""
index = s.find('capital')
if index == -1: return s
tail = s[index:].replace('capital', '').strip()
tail = tail[0].upper() + tail[1:]
return s[:index] + tail
class Aliases(object):
"""
Helper class useful for defining a set of alias tuples on a single object.
For instance, when defining a group or label with an alias, instead
of setting tuples in the constructor, you could use
``aliases.water`` if you first define:
>>> aliases = Aliases(water='H_2O', glucose='C_6H_{12}O_6')
>>> aliases.water
('water', 'H_2O')
This may be used to conveniently define aliases for groups, labels
or dimension names.
"""
def __init__(self, **kwargs):
for k,v in kwargs.items():
setattr(self, k, (k,v))
class sanitize_identifier_fn(param.ParameterizedFunction):
"""
Sanitizes group/label values for use in AttrTree attribute
access. Depending on the version parameter, either sanitization
appropriate for Python 2 (no unicode gn identifiers allowed) or
Python 3 (some unicode allowed) is used.
Note that if you are using Python 3, you can switch to version 2
for compatibility but you cannot enable relaxed sanitization if
you are using Python 2.
Special characters are sanitized using their (lowercase) unicode
name using the unicodedata module. For instance:
>>> unicodedata.name(u'$').lower()
'dollar sign'
As these names are often very long, this parameterized function
allows filtered, substitions and transforms to help shorten these
names appropriately.
"""
version = param.ObjectSelector(sys.version_info.major, objects=[2,3], doc="""
The sanitization version. If set to 2, more aggresive
sanitization appropriate for Python 2 is applied. Otherwise,
if set to 3, more relaxed, Python 3 sanitization is used.""")
capitalize = param.Boolean(default=True, doc="""
Whether the first letter should be converted to
uppercase. Note, this will only be applied to ASCII characters
in order to make sure paths aren't confused with method
names.""")
eliminations = param.List(['extended', 'accent', 'small', 'letter', 'sign', 'digit',
'latin', 'greek', 'arabic-indic', 'with', 'dollar'], doc="""
Lowercase strings to be eliminated from the unicode names in
order to shorten the sanitized name ( lowercase). Redundant
strings should be removed but too much elimination could cause
two unique strings to map to the same sanitized output.""")
substitutions = param.Dict(default={'circumflex':'power',
'asterisk':'times',
'solidus':'over'}, doc="""
Lowercase substitutions of substrings in unicode names. For
instance the ^ character has the name 'circumflex accent' even
though it is more typically used for exponentiation. Note that
substitutions occur after filtering and that there should be no
ordering dependence between substitutions.""")
transforms = param.List(default=[capitalize_unicode_name], doc="""
List of string transformation functions to apply after
filtering and substitution in order to further compress the
unicode name. For instance, the defaultcapitalize_unicode_name
function will turn the string "capital delta" into "Delta".""")
disallowed = param.List(default=['trait_names', '_ipython_display_',
'_getAttributeNames'], doc="""
An explicit list of name that should not be allowed as
attribute names on Tree objects.
By default, prevents IPython from creating an entry called
Trait_names due to an inconvenient getattr check (during
tab-completion).""")
disable_leading_underscore = param.Boolean(default=False, doc="""
Whether leading underscores should be allowed to be sanitized
with the leading prefix.""")
aliases = param.Dict(default={}, doc="""
A dictionary of aliases mapping long strings to their short,
sanitized equivalents""")
prefix = 'A_'
_lookup_table = param.Dict(default={}, doc="""
Cache of previously computed sanitizations""")
@param.parameterized.bothmethod
def add_aliases(self_or_cls, **kwargs):
"""
Conveniently add new aliases as keyword arguments. For instance
you can add a new alias with add_aliases(short='Longer string')
"""
self_or_cls.aliases.update({v:k for k,v in kwargs.items()})
@param.parameterized.bothmethod
def remove_aliases(self_or_cls, aliases):
"""
Remove a list of aliases.
"""
for k,v in self_or_cls.aliases.items():
if v in aliases:
self_or_cls.aliases.pop(k)
@param.parameterized.bothmethod
def allowable(self_or_cls, name, disable_leading_underscore=None):
disabled_reprs = ['javascript', 'jpeg', 'json', 'latex',
'latex', 'pdf', 'png', 'svg', 'markdown']
disabled_ = (self_or_cls.disable_leading_underscore
if disable_leading_underscore is None
else disable_leading_underscore)
if disabled_ and name.startswith('_'):
return False
isrepr = any(('_repr_%s_' % el) == name for el in disabled_reprs)
return (name not in self_or_cls.disallowed) and not isrepr
@param.parameterized.bothmethod
def prefixed(self, identifier, version):
"""
Whether or not the identifier will be prefixed.
Strings that require the prefix are generally not recommended.
"""
invalid_starting = ['Mn', 'Mc', 'Nd', 'Pc']
if identifier.startswith('_'): return True
return((identifier[0] in string.digits) if version==2
else (unicodedata.category(identifier[0]) in invalid_starting))
@param.parameterized.bothmethod
def remove_diacritics(self_or_cls, identifier):
"""
Remove diacritics and accents from the input leaving other
unicode characters alone."""
chars = ''
for c in identifier:
replacement = unicodedata.normalize('NFKD', c).encode('ASCII', 'ignore')
if replacement != '':
chars += safe_unicode(replacement)
else:
chars += c
return chars
@param.parameterized.bothmethod
def shortened_character_name(self_or_cls, c, eliminations=[], substitutions={}, transforms=[]):
"""
Given a unicode character c, return the shortened unicode name
(as a list of tokens) by applying the eliminations,
substitutions and transforms.
"""
name = unicodedata.name(c).lower()
# Filtering
for elim in eliminations:
name = name.replace(elim, '')
# Substitition
for i,o in substitutions.items():
name = name.replace(i, o)
for transform in transforms:
name = transform(name)
return ' '.join(name.strip().split()).replace(' ','_').replace('-','_')
def __call__(self, name, escape=True, version=None):
if name in [None, '']:
return name
elif name in self.aliases:
return self.aliases[name]
elif name in self._lookup_table:
return self._lookup_table[name]
name = safe_unicode(name)
version = self.version if version is None else version
if not self.allowable(name):
raise AttributeError("String %r is in the disallowed list of attribute names: %r" % self.disallowed)
if version == 2:
name = self.remove_diacritics(name)
if self.capitalize and name and name[0] in string.ascii_lowercase:
name = name[0].upper()+name[1:]
sanitized = (self.sanitize_py2(name) if version==2 else self.sanitize_py3(name))
if self.prefixed(name, version):
sanitized = self.prefix + sanitized
self._lookup_table[name] = sanitized
return sanitized
def _process_underscores(self, tokens):
"Strip underscores to make sure the number is correct after join"
groups = [[str(''.join(el))] if b else list(el)
for (b,el) in itertools.groupby(tokens, lambda k: k=='_')]
flattened = [el for group in groups for el in group]
processed = []
for token in flattened:
if token == '_': continue
if token.startswith('_'):
token = str(token[1:])
if token.endswith('_'):
token = str(token[:-1])
processed.append(token)
return processed
def sanitize_py2(self, name):
# This fix works but masks an issue in self.sanitize (py2)
prefix = '_' if name.startswith('_') else ''
valid_chars = string.ascii_letters+string.digits+'_'
return prefix + str('_'.join(self.sanitize(name, lambda c: c in valid_chars)))
def sanitize_py3(self, name):
if not name.isidentifier():
return '_'.join(self.sanitize(name, lambda c: ('_'+c).isidentifier()))
else:
return name
def sanitize(self, name, valid_fn):
"Accumulate blocks of hex and separate blocks by underscores"
invalid = {'\a':'a','\b':'b', '\v':'v','\f':'f','\r':'r'}
for cc in filter(lambda el: el in name, invalid.keys()):
raise Exception("Please use a raw string or escape control code '\%s'"
% invalid[cc])
sanitized, chars = [], ''
for split in name.split():
for c in split:
if valid_fn(c): chars += str(c) if c=='_' else c
else:
short = self.shortened_character_name(c, self.eliminations,
self.substitutions,
self.transforms)
sanitized.extend([chars] if chars else [])
if short != '':
sanitized.append(short)
chars = ''
if chars:
sanitized.extend([chars])
chars=''
return self._process_underscores(sanitized + ([chars] if chars else []))
sanitize_identifier = sanitize_identifier_fn.instance()
group_sanitizer = sanitize_identifier_fn.instance()
label_sanitizer = sanitize_identifier_fn.instance()
dimension_sanitizer = sanitize_identifier_fn.instance(capitalize=False)
def isnumeric(val):
if isinstance(val, (basestring, bool, np.bool_)):
return False
try:
float(val)
return True
except:
return False
def find_minmax(lims, olims):
"""
Takes (a1, a2) and (b1, b2) as input and returns
(np.nanmin(a1, b1), np.nanmax(a2, b2)). Used to calculate
min and max values of a number of items.
"""
try:
limzip = zip(list(lims), list(olims), [np.nanmin, np.nanmax])
limits = tuple([float(fn([l, ol])) for l, ol, fn in limzip])
except:
limits = (np.NaN, np.NaN)
return limits
def find_range(values, soft_range=[]):
"""
Safely finds either the numerical min and max of
a set of values, falling back to the first and
the last value in the sorted list of values.
"""
try:
values = np.array(values)
values = np.squeeze(values) if len(values.shape) > 1 else values
if len(soft_range):
values = np.concatenate([values, soft_range])
if values.dtype.kind == 'M':
return values.min(), values.max()
return np.nanmin(values), np.nanmax(values)
except:
try:
values = sorted(values)
return (values[0], values[-1])
except:
return (None, None)
def max_range(ranges):
"""
Computes the maximal lower and upper bounds from a list bounds.
"""
try:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
values = [r for r in ranges for v in r if v is not None]
if pd and all(isinstance(v, pd.tslib.Timestamp) for r in values for v in r):
values = [(v1.to_datetime64(), v2.to_datetime64()) for v1, v2 in values]
arr = np.array(values)
if arr.dtype.kind in 'M':
return arr[:, 0].min(), arr[:, 1].max()
return (np.nanmin(arr[:, 0]), np.nanmax(arr[:, 1]))
except:
return (np.NaN, np.NaN)
def max_extents(extents, zrange=False):
"""
Computes the maximal extent in 2D and 3D space from
list of 4-tuples or 6-tuples. If zrange is enabled
all extents are converted to 6-tuples to comput
x-, y- and z-limits.
"""
if zrange:
num = 6
inds = [(0, 3), (1, 4), (2, 5)]
extents = [e if len(e) == 6 else (e[0], e[1], None,
e[2], e[3], None)
for e in extents]
else:
num = 4
inds = [(0, 2), (1, 3)]
arr = list(zip(*extents)) if extents else []
extents = [np.NaN] * num
if len(arr) == 0:
return extents
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
for lidx, uidx in inds:
lower = [v for v in arr[lidx] if v is not None]
upper = [v for v in arr[uidx] if v is not None]
if lower and isinstance(lower[0], np.datetime64):
extents[lidx] = np.min(lower)
elif lower:
extents[lidx] = np.nanmin(lower)
if upper and isinstance(upper[0], np.datetime64):
extents[uidx] = np.max(upper)
elif upper:
extents[uidx] = np.nanmax(upper)
return tuple(extents)
def int_to_alpha(n, upper=True):
"Generates alphanumeric labels of form A-Z, AA-ZZ etc."
casenum = 65 if upper else 97
label = ''
count= 0
if n == 0: return str(chr(n + casenum))
while n >= 0:
mod, div = n % 26, n
for _ in range(count):
div //= 26
div %= 26
if count == 0:
val = mod
else:
val = div
label += str(chr(val + casenum))
count += 1
n -= 26**count
return label[::-1]
def int_to_roman(input):
if type(input) != type(1):
raise TypeError("expected integer, got %s" % type(input))
if not 0 < input < 4000:
raise ValueError("Argument must be between 1 and 3999")
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I')
result = ""
for i in range(len(ints)):
count = int(input / ints[i])
result += nums[i] * count
input -= ints[i] * count
return result
def unique_iterator(seq):
"""
Returns an iterator containing all non-duplicate elements
in the input sequence.
"""
seen = set()
for item in seq:
if item not in seen:
seen.add(item)
yield item
def unique_array(arr):
"""
Returns an array of unique values in the input order
"""
if pd:
return pd.unique(arr)
else:
_, uniq_inds = np.unique(arr, return_index=True)
return arr[np.sort(uniq_inds)]
def match_spec(element, specification):
"""
Matches the group.label specification of the supplied
element against the supplied specification dictionary
returning the value of the best match.
"""
match_tuple = ()
match = specification.get((), {})
for spec in [type(element).__name__,
group_sanitizer(element.group, escape=False),
label_sanitizer(element.label, escape=False)]:
match_tuple += (spec,)
if match_tuple in specification:
match = specification[match_tuple]
return match
def python2sort(x,key=None):
if len(x) == 0: return x
it = iter(x)
groups = [[next(it)]]
for item in it:
for group in groups:
try:
item_precedence = item if key is None else key(item)
group_precedence = group[0] if key is None else key(group[0])
item_precedence < group_precedence # exception if not comparable
group.append(item)
break
except TypeError:
continue
else: # did not break, make new group
groups.append([item])
return itertools.chain.from_iterable(sorted(group, key=key) for group in groups)
def dimension_sort(odict, kdims, vdims, categorical, key_index, cached_values):
"""
Sorts data by key using usual Python tuple sorting semantics
or sorts in categorical order for any categorical Dimensions.
"""
sortkws = {}
ndims = len(kdims)
dimensions = kdims+vdims
indexes = [(dimensions[i], int(i not in range(ndims)),
i if i in range(ndims) else i-ndims)
for i in key_index]
if len(set(key_index)) != len(key_index):
raise ValueError("Cannot sort on duplicated dimensions")
elif categorical:
sortkws['key'] = lambda x: tuple(cached_values[dim.name].index(x[t][d])
if dim.values else x[t][d]
for i, (dim, t, d) in enumerate(indexes))
elif key_index != list(range(len(kdims+vdims))):
sortkws['key'] = lambda x: tuple(x[t][d] for _, t, d in indexes)
if sys.version_info.major == 3:
return python2sort(odict.items(), **sortkws)
else:
return sorted(odict.items(), **sortkws)
# Copied from param should make param version public
def is_number(obj):
if isinstance(obj, numbers.Number): return True
# The extra check is for classes that behave like numbers, such as those
# found in numpy, gmpy, etc.
elif (hasattr(obj, '__int__') and hasattr(obj, '__add__')): return True
# This is for older versions of gmpy
elif hasattr(obj, 'qdiv'): return True
else: return False
class ProgressIndicator(param.Parameterized):
"""
Baseclass for any ProgressIndicator that indicates progress
as a completion percentage.
"""
percent_range = param.NumericTuple(default=(0.0, 100.0), doc="""
The total percentage spanned by the progress bar when called
with a value between 0% and 100%. This allows an overall
completion in percent to be broken down into smaller sub-tasks
that individually complete to 100 percent.""")
label = param.String(default='Progress', allow_None=True, doc="""
The label of the current progress bar.""")
def __call__(self, completion):
raise NotImplementedError
def sort_topologically(graph):
"""
Stackless topological sorting.
graph = {
3: [1],
5: [3],
4: [2],
6: [4],
}
sort_topologically(graph)
[set([1, 2]), set([3, 4]), set([5, 6])]
"""
levels_by_name = {}
names_by_level = defaultdict(set)
def add_level_to_name(name, level):
levels_by_name[name] = level
names_by_level[level].add(name)
def walk_depth_first(name):
stack = [name]
while(stack):
name = stack.pop()
if name in levels_by_name:
continue
if name not in graph or not graph[name]:
level = 0
add_level_to_name(name, level)
continue
children = graph[name]
children_not_calculated = [child for child in children if child not in levels_by_name]
if children_not_calculated:
stack.append(name)
stack.extend(children_not_calculated)
continue
level = 1 + max(levels_by_name[lname] for lname in children)
add_level_to_name(name, level)
for name in graph:
walk_depth_first(name)
return list(itertools.takewhile(lambda x: x is not None,
(names_by_level.get(i, None)
for i in itertools.count())))
def get_overlay_spec(o, k, v):
"""
Gets the type.group.label + key spec from an Element in an Overlay.
"""
k = wrap_tuple(k)
return ((type(v).__name__, v.group, v.label) + k if len(o.kdims) else
(type(v).__name__,) + k)
def layer_sort(hmap):
"""
Find a global ordering for layers in a HoloMap of CompositeOverlay
types.
"""
orderings = {}
for o in hmap:
okeys = [get_overlay_spec(o, k, v) for k, v in o.data.items()]
if len(okeys) == 1 and not okeys[0] in orderings:
orderings[okeys[0]] = []
else:
orderings.update({k: [] if k == v else [v] for k, v in zip(okeys[1:], okeys)})
return [i for g in sort_topologically(orderings) for i in sorted(g)]
def layer_groups(ordering, length=2):
"""
Splits a global ordering of Layers into groups based on a slice of
the spec. The grouping behavior can be modified by changing the
length of spec the entries are grouped by.
"""
group_orderings = defaultdict(list)
for el in ordering:
group_orderings[el[:length]].append(el)
return group_orderings
def group_select(selects, length=None, depth=None):
"""
Given a list of key tuples to select, groups them into sensible
chunks to avoid duplicating indexing operations.
"""
if length == None and depth == None:
length = depth = len(selects[0])
getter = operator.itemgetter(depth-length)
if length > 1:
selects = sorted(selects, key=getter)
grouped_selects = defaultdict(dict)
for k, v in itertools.groupby(selects, getter):
grouped_selects[k] = group_select(list(v), length-1, depth)
return grouped_selects
else:
return list(selects)
def iterative_select(obj, dimensions, selects, depth=None):
"""
Takes the output of group_select selecting subgroups iteratively,
avoiding duplicating select operations.
"""
ndims = len(dimensions)
depth = depth if depth is not None else ndims
items = []
if isinstance(selects, dict):
for k, v in selects.items():
items += iterative_select(obj.select(**{dimensions[ndims-depth]: k}),
dimensions, v, depth-1)
else:
for s in selects:
items.append((s, obj.select(**{dimensions[-1]: s[-1]})))
return items
def get_spec(obj):
"""
Gets the spec from any labeled data object.
"""
return (obj.__class__.__name__,
obj.group, obj.label)
def find_file(folder, filename):
"""
Find a file given folder and filename.
"""
matches = []
for root, _, filenames in os.walk(folder):
for filename in fnmatch.filter(filenames, filename):
matches.append(os.path.join(root, filename))
return matches[-1]
def is_dataframe(data):
"""
Checks whether the supplied data is DatFrame type.
"""
return((pd is not None and isinstance(data, pd.DataFrame)) or
(dd is not None and isinstance(data, dd.DataFrame)))
def get_param_values(data):
params = dict(kdims=data.kdims, vdims=data.vdims,
label=data.label)
if data.group != data.params()['group'].default:
params['group'] = data.group
return params
def get_ndmapping_label(ndmapping, attr):
"""
Function to get the first non-auxiliary object
label attribute from an NdMapping.
"""
label = None
els = itervalues(ndmapping.data)
while label is None:
try:
el = next(els)
except StopIteration:
return None
if not el._auxiliary_component:
label = getattr(el, attr)
if attr == 'group':
tp = type(el).__name__
if tp == label:
return None
return label
def wrap_tuple(unwrapped):
""" Wraps any non-tuple types in a tuple """
return (unwrapped if isinstance(unwrapped, tuple) else (unwrapped,))
def itervalues(obj):
"Get value iterator from dictionary for Python 2 and 3"
return iter(obj.values()) if sys.version_info.major == 3 else obj.itervalues()
def iterkeys(obj):
"Get key iterator from dictionary for Python 2 and 3"
return iter(obj.keys()) if sys.version_info.major == 3 else obj.iterkeys()
def get_unique_keys(ndmapping, dimensions):
inds = [ndmapping.get_dimension_index(dim) for dim in dimensions]
getter = operator.itemgetter(*inds)
return unique_iterator(getter(key) if len(inds) > 1 else (key[inds[0]],)
for key in ndmapping.data.keys())
def unpack_group(group, getter):
for k, v in group.iterrows():
obj = v.values[0]
key = getter(k)
if hasattr(obj, 'kdims'):
yield (key, obj)
else:
obj = tuple(v)
yield (wrap_tuple(key), obj)
class ndmapping_groupby(param.ParameterizedFunction):
"""
Apply a groupby operation to an NdMapping, using pandas to improve
performance (if available).
"""
def __call__(self, ndmapping, dimensions, container_type,
group_type, sort=False, **kwargs):
try:
import pandas # noqa (optional import)
groupby = self.groupby_pandas
except:
groupby = self.groupby_python
return groupby(ndmapping, dimensions, container_type,
group_type, sort=sort, **kwargs)
@param.parameterized.bothmethod
def groupby_pandas(self_or_cls, ndmapping, dimensions, container_type,
group_type, sort=False, **kwargs):
if 'kdims' in kwargs:
idims = [ndmapping.get_dimension(d) for d in kwargs['kdims']]
else:
idims = [dim for dim in ndmapping.kdims if dim not in dimensions]
all_dims = [d.name for d in ndmapping.kdims]
inds = [ndmapping.get_dimension_index(dim) for dim in idims]
getter = operator.itemgetter(*inds) if inds else lambda x: tuple()
multi_index = pd.MultiIndex.from_tuples(ndmapping.keys(), names=all_dims)
df = pd.DataFrame(list(map(wrap_tuple, ndmapping.values())), index=multi_index)
kwargs = dict(dict(get_param_values(ndmapping), kdims=idims), **kwargs)
groups = ((wrap_tuple(k), group_type(OrderedDict(unpack_group(group, getter)), **kwargs))
for k, group in df.groupby(level=[d.name for d in dimensions]))
if sort:
selects = list(get_unique_keys(ndmapping, dimensions))
groups = sorted(groups, key=lambda x: selects.index(x[0]))
return container_type(groups, kdims=dimensions)
@param.parameterized.bothmethod
def groupby_python(self_or_cls, ndmapping, dimensions, container_type,
group_type, sort=False, **kwargs):
idims = [dim for dim in ndmapping.kdims if dim not in dimensions]
dim_names = [dim.name for dim in dimensions]
selects = get_unique_keys(ndmapping, dimensions)
selects = group_select(list(selects))
groups = [(k, group_type((v.reindex(idims) if hasattr(v, 'kdims')
else [((), (v,))]), **kwargs))
for k, v in iterative_select(ndmapping, dim_names, selects)]
return container_type(groups, kdims=dimensions)
def cartesian_product(arrays):
"""
Computes the cartesian product of a list of 1D arrays
returning arrays matching the shape defined by all
supplied dimensions.
"""
return np.broadcast_arrays(*np.ix_(*arrays))
def arglexsort(arrays):
"""
Returns the indices of the lexicographical sorting
order of the supplied arrays.
"""
dtypes = ','.join(array.dtype.str for array in arrays)
recarray = np.empty(len(arrays[0]), dtype=dtypes)
for i, array in enumerate(arrays):
recarray['f%s' % i] = array
return recarray.argsort()
def get_dynamic_item(map_obj, dimensions, key):
"""
Looks up an item in a DynamicMap given a list of dimensions
and a corresponding key. The dimensions must be a subset
of the map_obj key dimensions.
"""
if isinstance(key, tuple):
dims = {d.name: k for d, k in zip(dimensions, key)
if d in map_obj.kdims}
key = tuple(dims.get(d.name) for d in map_obj.kdims)
el = map_obj.select([lambda x: type(x).__name__ == 'DynamicMap'],
**dims)
elif key < map_obj.counter:
key_offset = max([key-map_obj.cache_size, 0])
key = map_obj.keys()[min([key-key_offset,
len(map_obj)-1])]
el = map_obj[key]
elif key >= map_obj.counter:
el = next(map_obj)
key = list(map_obj.keys())[-1]
else:
el = None
return key, el
|
vascotenner/holoviews
|
holoviews/core/util.py
|
Python
|
bsd-3-clause
| 31,475 | 0.004829 |
# -*- coding: utf-8 -*-
#
# UnitX documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 17 05:31:20 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'UnitX'
copyright = u'2016, Tasuku TAKAHASHI'
author = u'Tasuku TAKAHASHI'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.7.0'
# The full version, including alpha/beta/rc tags.
release = u'0.7.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
#html_theme = 'default' #Good!
#html_theme = 'sphinx_rtd_theme' #Good!
#html_theme = 'agogo' #Good!
#html_theme = 'nature' #Pretty Good!
# Options for HTML output
# -----------------------
# Use our custom theme.
html_theme = 'pydoctheme'
html_theme_path = ['tools']
html_theme_options = {'collapsiblesidebar': True}
# Short title used e.g. for <title> HTML tags.
html_short_title = '%s Documentation' % release
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# Path to find HTML templates.
templates_path = ['tools/templates']
# Custom sidebar templates, filenames relative to this file.
html_sidebars = {
'index': 'indexsidebar.html',
}
# Additional templates that should be rendered to pages.
html_additional_pages = {
'download': 'download.html',
'index': 'indexcontent.html',
}
# Output an OpenSearch description file.
html_use_opensearch = 'http://supertask.github.io/unitx/'
# Additional static files.
html_static_path = ['tools/static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'UnitX' + release.replace('.', '')
# Split the index
html_split_index = True
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'UnitX.tex', u'UnitX Documentation',
u'Tasuku TAKAHASHI', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'unitx', u'UnitX Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'UnitX', u'UnitX Documentation',
author, 'UnitX', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
0ED/UnitX
|
doc/conf.py
|
Python
|
mit
| 6,844 | 0.005552 |
# -*- coding: utf-8 -*-
from allauth.socialaccount.tests import create_oauth_tests
from allauth.tests import MockedResponse
from allauth.socialaccount.providers import registry
from .provider import FlickrProvider
class FlickrTests(create_oauth_tests(registry.by_id(FlickrProvider.id))):
def get_mocked_response(self):
#
return [
MockedResponse(200, r"""
{"stat": "ok", "user": {"username": {"_content": "pennersr"}, "id": "12345678@N00"}}
"""), # noqa
MockedResponse(200, r"""
{"person": {"username": {"_content": "pennersr"}, "photosurl": {"_content": "http://www.flickr.com/photos/12345678@N00/"}, "nsid": "12345678@N00", "path_alias": null, "photos": {"count": {"_content": 0}, "firstdatetaken": {"_content": null}, "views": {"_content": "28"}, "firstdate": {"_content": null}}, "iconserver": "0", "description": {"_content": ""}, "mobileurl": {"_content": "http://m.flickr.com/photostream.gne?id=6294613"}, "profileurl": {"_content": "http://www.flickr.com/people/12345678@N00/"}, "mbox_sha1sum": {"_content": "5e5b359c123e54f95236209c8808d607a5cdd21e"}, "ispro": 0, "location": {"_content": ""}, "id": "12345678@N00", "realname": {"_content": "raymond penners"}, "iconfarm": 0}, "stat": "ok"}
""")] # noqa
def test_login(self):
account = super(FlickrTests, self).test_login()
f_account = account.get_provider_account()
self.assertEqual(account.user.first_name,
'raymond')
self.assertEqual(account.user.last_name,
'penners')
self.assertEqual(f_account.get_profile_url(),
'http://www.flickr.com/people/12345678@N00/')
|
tejesh95/Zubio.in
|
zubio/allauth/socialaccount/providers/flickr/tests.py
|
Python
|
mit
| 1,692 | 0 |
"""
kombu.serialization
===================
Serialization utilities.
:copyright: (c) 2009 - 2011 by Ask Solem
:license: BSD, see LICENSE for more details.
"""
import codecs
import sys
import pickle as pypickle
try:
import cPickle as cpickle
except ImportError:
cpickle = None # noqa
from kombu.utils.encoding import str_to_bytes
if sys.platform.startswith("java"):
def _decode(t, coding):
return codecs.getdecoder(coding)(t)[0]
else:
_decode = codecs.decode
if sys.version_info < (2, 6): # pragma: no cover
# cPickle is broken in Python <= 2.5.
# It unsafely and incorrectly uses relative instead of absolute
# imports,
# so e.g.:
# exceptions.KeyError
# becomes:
# kombu.exceptions.KeyError
#
# Your best choice is to upgrade to Python 2.6,
# as while the pure pickle version has worse performance,
# it is the only safe option for older Python versions.
pickle = pypickle
else:
pickle = cpickle or pypickle
bytes_type = str
if sys.version_info >= (3, 0):
bytes_type = bytes
class SerializerNotInstalled(StandardError):
"""Support for the requested serialization type is not installed"""
pass
class SerializerRegistry(object):
"""The registry keeps track of serialization methods."""
def __init__(self):
self._encoders = {}
self._decoders = {}
self._default_encode = None
self._default_content_type = None
self._default_content_encoding = None
self._disabled_content_types = set()
self.type_to_name = {}
def register(self, name, encoder, decoder, content_type,
content_encoding='utf-8'):
if encoder:
self._encoders[name] = (content_type, content_encoding, encoder)
if decoder:
self._decoders[content_type] = decoder
self.type_to_name[content_type] = name
def disable(self, name):
if '/' not in name:
name = self.type_to_name[name]
self._disabled_content_types.add(name)
def unregister(self, name):
try:
content_type = self._encoders[name][0]
self._decoders.pop(content_type, None)
self._encoders.pop(name, None)
self.type_to_name.pop(content_type, None)
except KeyError:
raise SerializerNotInstalled(
"No encoder/decoder installed for %s" % name)
def _set_default_serializer(self, name):
"""
Set the default serialization method used by this library.
:param name: The name of the registered serialization method.
For example, `json` (default), `pickle`, `yaml`, `msgpack`,
or any custom methods registered using :meth:`register`.
:raises SerializerNotInstalled: If the serialization method
requested is not available.
"""
try:
(self._default_content_type, self._default_content_encoding,
self._default_encode) = self._encoders[name]
except KeyError:
raise SerializerNotInstalled(
"No encoder installed for %s" % name)
def encode(self, data, serializer=None):
if serializer == "raw":
return raw_encode(data)
if serializer and not self._encoders.get(serializer):
raise SerializerNotInstalled(
"No encoder installed for %s" % serializer)
# If a raw string was sent, assume binary encoding
# (it's likely either ASCII or a raw binary file, and a character
# set of 'binary' will encompass both, even if not ideal.
if not serializer and isinstance(data, bytes_type):
# In Python 3+, this would be "bytes"; allow binary data to be
# sent as a message without getting encoder errors
return "application/data", "binary", data
# For Unicode objects, force it into a string
if not serializer and isinstance(data, unicode):
payload = data.encode("utf-8")
return "text/plain", "utf-8", payload
if serializer:
content_type, content_encoding, encoder = \
self._encoders[serializer]
else:
encoder = self._default_encode
content_type = self._default_content_type
content_encoding = self._default_content_encoding
payload = encoder(data)
return content_type, content_encoding, payload
def decode(self, data, content_type, content_encoding, force=False):
if content_type in self._disabled_content_types:
raise SerializerNotInstalled(
"Content-type %r has been disabled." % (content_type, ))
content_type = content_type or 'application/data'
content_encoding = (content_encoding or 'utf-8').lower()
# Don't decode 8-bit strings or Unicode objects
if content_encoding not in ('binary', 'ascii-8bit') and \
not isinstance(data, unicode):
data = _decode(data, content_encoding)
try:
decoder = self._decoders[content_type]
except KeyError:
return data
if not data:
return data
return decoder(data)
"""
.. data:: registry
Global registry of serializers/deserializers.
"""
registry = SerializerRegistry()
"""
.. function:: encode(data, serializer=default_serializer)
Serialize a data structure into a string suitable for sending
as an AMQP message body.
:param data: The message data to send. Can be a list,
dictionary or a string.
:keyword serializer: An optional string representing
the serialization method you want the data marshalled
into. (For example, `json`, `raw`, or `pickle`).
If :const:`None` (default), then json will be used, unless
`data` is a :class:`str` or :class:`unicode` object. In this
latter case, no serialization occurs as it would be
unnecessary.
Note that if `serializer` is specified, then that
serialization method will be used even if a :class:`str`
or :class:`unicode` object is passed in.
:returns: A three-item tuple containing the content type
(e.g., `application/json`), content encoding, (e.g.,
`utf-8`) and a string containing the serialized
data.
:raises SerializerNotInstalled: If the serialization method
requested is not available.
"""
encode = registry.encode
"""
.. function:: decode(data, content_type, content_encoding):
Deserialize a data stream as serialized using `encode`
based on `content_type`.
:param data: The message data to deserialize.
:param content_type: The content-type of the data.
(e.g., `application/json`).
:param content_encoding: The content-encoding of the data.
(e.g., `utf-8`, `binary`, or `us-ascii`).
:returns: The unserialized data.
"""
decode = registry.decode
"""
.. function:: register(name, encoder, decoder, content_type,
content_encoding="utf-8"):
Register a new encoder/decoder.
:param name: A convenience name for the serialization method.
:param encoder: A method that will be passed a python data structure
and should return a string representing the serialized data.
If :const:`None`, then only a decoder will be registered. Encoding
will not be possible.
:param decoder: A method that will be passed a string representing
serialized data and should return a python data structure.
If :const:`None`, then only an encoder will be registered.
Decoding will not be possible.
:param content_type: The mime-type describing the serialized
structure.
:param content_encoding: The content encoding (character set) that
the `decoder` method will be returning. Will usually be
utf-8`, `us-ascii`, or `binary`.
"""
register = registry.register
"""
.. function:: unregister(name):
Unregister registered encoder/decoder.
:param name: Registered serialization method name.
"""
unregister = registry.unregister
def raw_encode(data):
"""Special case serializer."""
content_type = 'application/data'
payload = data
if isinstance(payload, unicode):
content_encoding = 'utf-8'
payload = payload.encode(content_encoding)
else:
content_encoding = 'binary'
return content_type, content_encoding, payload
def register_json():
"""Register a encoder/decoder for JSON serialization."""
from anyjson import serialize as json_serialize
from anyjson import deserialize as json_deserialize
registry.register('json', json_serialize, json_deserialize,
content_type='application/json',
content_encoding='utf-8')
def register_yaml():
"""Register a encoder/decoder for YAML serialization.
It is slower than JSON, but allows for more data types
to be serialized. Useful if you need to send data such as dates"""
try:
import yaml
registry.register('yaml', yaml.safe_dump, yaml.safe_load,
content_type='application/x-yaml',
content_encoding='utf-8')
except ImportError:
def not_available(*args, **kwargs):
"""In case a client receives a yaml message, but yaml
isn't installed."""
raise SerializerNotInstalled(
"No decoder installed for YAML. Install the PyYAML library")
registry.register('yaml', None, not_available, 'application/x-yaml')
def register_pickle():
"""The fastest serialization method, but restricts
you to python clients."""
def deserialize(data):
return pickle.loads(str_to_bytes(data))
registry.register('pickle', pickle.dumps, deserialize,
content_type='application/x-python-serialize',
content_encoding='binary')
def register_msgpack():
"""See http://msgpack.sourceforge.net/"""
try:
import msgpack
registry.register('msgpack', msgpack.packs, msgpack.unpacks,
content_type='application/x-msgpack',
content_encoding='binary')
except ImportError:
def not_available(*args, **kwargs):
"""In case a client receives a msgpack message, but yaml
isn't installed."""
raise SerializerNotInstalled(
"No decoder installed for msgpack. "
"Install the msgpack library")
registry.register('msgpack', None, not_available,
'application/x-msgpack')
# Register the base serialization methods.
register_json()
register_pickle()
register_yaml()
register_msgpack()
# JSON is assumed to always be available, so is the default.
# (this matches the historical use of kombu.)
registry._set_default_serializer('json')
|
softak/webfaction_demo
|
vendor-local/lib/python/kombu/serialization.py
|
Python
|
bsd-3-clause
| 11,002 | 0.000273 |
# -*- coding: utf-8 -*-
"""
BlueButtonDev.appmgmt
FILE: utils
Created: 12/2/15 8:09 PM
"""
__author__ = 'Mark Scrimshire:@ekivemark'
from django.contrib import messages
from .models import DEVELOPER_ROLE_CHOICES
from django.conf import settings
def Choice_Display(role):
"""
Receive a string of the current role
Lookup in DEVELOPER_ROLE_CHOICES
Return the String
:param role:
:return:
"""
result = dict(DEVELOPER_ROLE_CHOICES).get(role)
if role == "None":
return
else:
return result
class MessageMixin(object):
"""
Make it easy to display notification messages when using Class Based Views.
"""
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(MessageMixin, self).delete(request, *args, **kwargs)
def form_valid(self, form):
messages.success(self.request, self.success_message)
return super(MessageMixin, self).form_valid(form)
|
ekivemark/BlueButtonDev
|
appmgmt/utils.py
|
Python
|
apache-2.0
| 1,004 | 0.002988 |
'''
Created on Jun 15, 2014
@author: geraldine
'''
import socket
import fcntl
import struct
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(),0x8915,struct.pack('256s', ifname[:15]))[20:24])
|
DeeDee22/nelliepi
|
src/ch/fluxkompensator/nelliepi/IPAddressFinder.py
|
Python
|
gpl-2.0
| 283 | 0.021201 |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from app.openapi_server.models.base_model_ import Model
from openapi_server import util
class GithubRepositorypermissions(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, admin: bool=None, push: bool=None, pull: bool=None, _class: str=None): # noqa: E501
"""GithubRepositorypermissions - a model defined in Swagger
:param admin: The admin of this GithubRepositorypermissions. # noqa: E501
:type admin: bool
:param push: The push of this GithubRepositorypermissions. # noqa: E501
:type push: bool
:param pull: The pull of this GithubRepositorypermissions. # noqa: E501
:type pull: bool
:param _class: The _class of this GithubRepositorypermissions. # noqa: E501
:type _class: str
"""
self.swagger_types = {
'admin': bool,
'push': bool,
'pull': bool,
'_class': str
}
self.attribute_map = {
'admin': 'admin',
'push': 'push',
'pull': 'pull',
'_class': '_class'
}
self._admin = admin
self._push = push
self._pull = pull
self.__class = _class
@classmethod
def from_dict(cls, dikt) -> 'GithubRepositorypermissions':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The GithubRepositorypermissions of this GithubRepositorypermissions. # noqa: E501
:rtype: GithubRepositorypermissions
"""
return util.deserialize_model(dikt, cls)
@property
def admin(self) -> bool:
"""Gets the admin of this GithubRepositorypermissions.
:return: The admin of this GithubRepositorypermissions.
:rtype: bool
"""
return self._admin
@admin.setter
def admin(self, admin: bool):
"""Sets the admin of this GithubRepositorypermissions.
:param admin: The admin of this GithubRepositorypermissions.
:type admin: bool
"""
self._admin = admin
@property
def push(self) -> bool:
"""Gets the push of this GithubRepositorypermissions.
:return: The push of this GithubRepositorypermissions.
:rtype: bool
"""
return self._push
@push.setter
def push(self, push: bool):
"""Sets the push of this GithubRepositorypermissions.
:param push: The push of this GithubRepositorypermissions.
:type push: bool
"""
self._push = push
@property
def pull(self) -> bool:
"""Gets the pull of this GithubRepositorypermissions.
:return: The pull of this GithubRepositorypermissions.
:rtype: bool
"""
return self._pull
@pull.setter
def pull(self, pull: bool):
"""Sets the pull of this GithubRepositorypermissions.
:param pull: The pull of this GithubRepositorypermissions.
:type pull: bool
"""
self._pull = pull
@property
def _class(self) -> str:
"""Gets the _class of this GithubRepositorypermissions.
:return: The _class of this GithubRepositorypermissions.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class: str):
"""Sets the _class of this GithubRepositorypermissions.
:param _class: The _class of this GithubRepositorypermissions.
:type _class: str
"""
self.__class = _class
|
cliffano/swaggy-jenkins
|
clients/python-blueplanet/generated/app/openapi_server/models/github_repositorypermissions.py
|
Python
|
mit
| 3,754 | 0.002397 |
# -*- coding: utf-8 -*-
class CrazyBoxError(Exception):
"""
The base class for custom exceptions raised by crazybox.
"""
pass
class DockerError(Exception):
"""
An error occurred with the underlying docker system.
"""
pass
|
USTB-LETTers/judger
|
exceptions.py
|
Python
|
mit
| 256 | 0 |
import datetime
from django.test import override_settings, TestCase
from django_twilio.models import Caller
from mock import Mock, patch, PropertyMock
from model_mommy import mommy
from twilio.rest.exceptions import TwilioRestException
from .mommy_recipes import caller_recipe, message_recipe, phone_number_recipe
from django_twilio_sms.models import (
Account,
Action,
ApiVersion,
Currency,
Error,
Message,
MessagingService,
PhoneNumber,
Response
)
class CommonTestCase(TestCase):
def string_test(self, model, test_value, **kwargs):
obj = mommy.make('django_twilio_sms.'+model, **kwargs)
self.assertEqual(test_value, obj.__str__())
class AccountModelTest(CommonTestCase):
def mock_account(self, owner_account_sid='test'):
return Mock(
friendly_name='test',
type='Full',
status='active',
sid='test',
owner_account_sid=owner_account_sid,
)
def test_unicode(self):
self.string_test('Account', 'abc', **{'sid': 'abc'})
def test_get_account_type_choice_account_type_display_equal_choice(self):
self.assertEqual(0, Account.get_account_type_choice('Trial'))
def test_get_account_type_choice_account_type_display_not_equal_choice(
self):
self.assertEqual(None, Account.get_account_type_choice('test'))
def test_get_status_choice_status_display_equal_choice(self):
self.assertEqual(0, Account.get_status_choice('active'))
def test_get_status_choice_status_display_not_equal_choice(self):
self.assertEqual(None, Account.get_status_choice('test'))
def test_get_or_create_if_not_account_sid_no_exception(self):
account_1 = mommy.make(Account, sid='test')
account_2 = Account.get_or_create(account=self.mock_account())
self.assertEqual(1, Account.objects.all().count())
self.assertEqual(account_1, account_2)
def test_get_or_create_if_account_sid_no_exception(self):
account_1 = mommy.make(Account, sid='test')
account_2 = Account.get_or_create(account_sid='test')
self.assertEqual(1, Account.objects.all().count())
self.assertEqual(account_1, account_2)
def test_get_or_create_if_not_account_sid_with_exception(self):
Account.get_or_create(account=self.mock_account())
self.assertEqual(1, Account.objects.all().count())
self.assertEqual('test', Account.objects.first().sid)
@patch(
'django_twilio_sms.models.Account.twilio_account',
new_callable=PropertyMock
)
def test_get_or_create_if_account_sid_with_exception(self, twilio_account):
twilio_account.return_value = self.mock_account()
Account.get_or_create(account_sid='test')
self.assertEqual(1, Account.objects.all().count())
self.assertEqual('test', Account.objects.first().sid)
@patch(
'django_twilio_sms.models.Account.twilio_account',
new_callable=PropertyMock
)
def test_twilio_account(self, twilio_account):
mock_account = self.mock_account()
twilio_account.return_value = mock_account
self.assertEqual(mock_account, Account.twilio_account)
@patch(
'django_twilio_sms.models.Account.twilio_account',
new_callable=PropertyMock
)
def test_sync_twilio_account_if_not_account_sids_not_equal(
self, twilio_account):
twilio_account.return_value = self.mock_account()
account = mommy.make(Account, sid='test')
account.sync_twilio_account()
self.assertEqual('test', account.friendly_name)
self.assertEqual(Account.FULL, account.account_type)
self.assertEqual(Account.ACTIVE, account.status)
self.assertEqual(None, account.owner_account_sid)
def test_sync_twilio_account_if_account_sids_not_equal(self):
account = mommy.make(Account, sid='test')
account.sync_twilio_account(self.mock_account())
self.assertEqual('test', account.friendly_name)
self.assertEqual(Account.FULL, account.account_type)
self.assertEqual(Account.ACTIVE, account.status)
self.assertEqual(None, account.owner_account_sid)
def test_sync_twilio_account_if_account_sids_equal(self):
owner_account = mommy.make(Account, sid='ownertest')
account = mommy.make(Account, sid='test')
account.sync_twilio_account(self.mock_account('ownertest'))
self.assertEqual('test', account.friendly_name)
self.assertEqual(Account.FULL, account.account_type)
self.assertEqual(Account.ACTIVE, account.status)
self.assertEqual(owner_account, account.owner_account_sid)
class ApiVersionModelTest(CommonTestCase):
def test_unicode(self):
api_version = mommy.make(ApiVersion)
self.assertEqual(
'{}'.format(api_version.date), api_version.__str__()
)
def test_get_or_create_created_false(self):
api_version = mommy.make(ApiVersion)
self.assertEqual(
api_version, ApiVersion.get_or_create(api_version.date)
)
self.assertEqual(1, ApiVersion.objects.all().count())
def test_get_or_create_created_true(self):
date = datetime.date(2016, 1, 1)
api_version = ApiVersion.get_or_create(date)
self.assertEqual(date, api_version.date)
self.assertEqual(1, ApiVersion.objects.all().count())
class CurrencyModelTest(CommonTestCase):
def test_unicode(self):
self.string_test('Currency', 'abc', **{'code': 'abc'})
def test_get_or_create_created_false(self):
currency = mommy.make(Currency)
self.assertEqual(currency, Currency.get_or_create(currency.code))
self.assertEqual(1, Currency.objects.all().count())
def test_get_or_create_created_true(self):
currency_code = 'USD'
currency = Currency.get_or_create(currency_code)
self.assertEqual(currency_code, currency.code)
self.assertEqual(1, Currency.objects.all().count())
class ErrorModelTest(CommonTestCase):
def test_unicode(self):
self.string_test('Error', 'abc', **{'code': 'abc'})
def test_get_or_create_created_false(self):
error = mommy.make(Error)
self.assertEqual(error, Error.get_or_create(error.code, error.message))
self.assertEqual(1, Error.objects.all().count())
def get_or_create_created_true(self):
error_code = '10015'
error_message = 'test'
error = Error.get_or_create(error_code, error_message)
self.assertEqual(error_code, error.code)
self.assertEqual(error_message, error.message)
self.assertEqual(1, Error.objects.all().count())
class MessageServiceModelTest(CommonTestCase):
def test_unicode(self):
self.string_test('MessagingService', 'abc', **{'sid': 'abc'})
def test_get_or_create_created_false(self):
messaging_service = mommy.make(MessagingService)
self.assertEqual(
messaging_service, MessagingService.get_or_create(
messaging_service.sid
)
)
self.assertEqual(1, MessagingService.objects.all().count())
def test_get_or_create_created_true(self):
sid = 'test'
messaging_service = MessagingService.get_or_create(sid)
self.assertEqual(sid, messaging_service.sid)
self.assertEqual(1, MessagingService.objects.all().count())
class PhoneNumberModelTest(CommonTestCase):
def test_unicode(self):
caller = caller_recipe.make()
self.string_test(
'PhoneNumber', '+19999999991', **{'caller': caller}
)
def test_get_or_create_is_instance(self):
phone_number = phone_number_recipe.make()
self.assertEqual(phone_number, PhoneNumber.get_or_create(phone_number))
def test_get_or_create_caller_created_false_phone_number_created_false(
self):
caller = caller_recipe.make()
phone_number = phone_number_recipe.make(caller=caller)
self.assertEqual(
phone_number, PhoneNumber.get_or_create(caller.phone_number)
)
self.assertEqual(1, Caller.objects.all().count())
self.assertEqual(1, PhoneNumber.objects.all().count())
def test_get_or_create_caller_created_true_phone_number_created_true(self):
number = '+19999999999'
phone_number = PhoneNumber.get_or_create(number)
self.assertEqual(number, phone_number.caller.phone_number)
self.assertEqual(1, Caller.objects.all().count())
self.assertEqual(1, PhoneNumber.objects.all().count())
def test_as_164(self):
phone_number = phone_number_recipe.make()
self.assertEqual('+19999999991', phone_number.as_e164)
def test_subscribe(self):
phone_number = phone_number_recipe.make(unsubscribed=True)
phone_number.subscribe()
self.assertFalse(phone_number.unsubscribed)
def test_unsubscribe(self):
phone_number = phone_number_recipe.make(unsubscribed=False)
phone_number.unsubscribe()
self.assertTrue(phone_number.unsubscribed)
class MessageModelTest(CommonTestCase):
def mock_message(
self, messaging_service_sid=None, error=None, direction='inbound',
status='accepted', price='-0.00750'):
account = mommy.make(Account, sid='testaccount')
if error:
error_code = error.code
error_message = error.message
else:
error_code = None
error_message = None
return Mock(
sid='test',
date_sent=datetime.date(2016, 1, 1),
account_sid=account.sid,
messaging_service_sid=messaging_service_sid,
body='test',
num_media=0,
num_segments=1,
status=status,
error_code=error_code,
error_message=error_message,
direction=direction,
price=price,
price_unit='USD',
api_version='2016-01-01',
from_='+19999999991',
to='+19999999992',
)
def test_unicode(self):
self.string_test(
'Message', '123', **{
'sid': '123',
'from_phone_number': phone_number_recipe.make(),
'to_phone_number': phone_number_recipe.make(),
}
)
def test_get_direction_choice_direction_display_equal_choice(self):
self.assertEqual(
Message.INBOUND, Message.get_direction_choice('inbound')
)
def test_get_direction_choice_direction_display_not_equal_choice(self):
self.assertEqual(None, Message.get_direction_choice('test'))
def test_get_status_choice_status_display_equal_choice(self):
self.assertEqual(
Message.ACCEPTED, Message.get_status_choice('accepted')
)
def test_get_status_choice_status_display_not_equal_choice(self):
self.assertEqual(None, Message.get_status_choice('test'))
def test_get_or_create_if_not_message_sid_no_exception(self):
message_1 = message_recipe.make(sid='test')
message_2, created = Message.get_or_create(message=Mock(sid='test'))
self.assertFalse(created)
self.assertEqual(message_1, message_2)
self.assertEqual(1, Message.objects.all().count())
def test_get_or_create_if_message_sid_no_exception(self):
message_1 = message_recipe.make(sid='test')
message_2, created = Message.get_or_create(message_sid='test')
self.assertFalse(created)
self.assertEqual(message_1, message_2)
self.assertEqual(1, Message.objects.all().count())
def test_get_or_create_if_not_message_sid_with_exception(self):
message, created = Message.get_or_create(message=self.mock_message())
self.assertTrue(created)
self.assertEqual(message, Message.objects.first())
self.assertEqual(1, Message.objects.all().count())
@patch(
'django_twilio_sms.models.Message.twilio_message',
new_callable=PropertyMock
)
def test_get_or_create_if_message_sid_with_exception(self, twilio_message):
twilio_message.return_value = self.mock_message()
message, created = Message.get_or_create('test')
self.assertTrue(created)
self.assertEqual(message, Message.objects.first())
self.assertEqual(1, Message.objects.all().count())
@patch('django_twilio_sms.models.Message.get_status_callback')
@patch('django_twilio_sms.models.twilio_client')
def test_send_message(self, mock_client, mock_status_callback):
mock_client.messages.create.return_value = self.mock_message()
mock_status_callback.return_value = 'test'
message, created = Message.send_message(
body='test', to='+19999999992', from_='+19999999991'
)
self.assertIsInstance(message, Message)
self.assertTrue(mock_status_callback.called)
mock_client.messages.create.assert_called_with(
body='test',
to='+19999999992',
from_='+19999999991',
status_callback='test'
)
@patch('django_twilio_sms.models.twilio_client')
def test_twilio_message_no_exception(self, mock_client):
mock_message = self.mock_message()
mock_client.messages.get.return_value = mock_message
message = message_recipe.make(sid='test')
self.assertEqual(mock_message, message.twilio_message)
self.assertEqual(1, mock_client.messages.get.call_count)
@override_settings(DJANGO_TWILIO_SMS_MAX_RETRIES=2)
@override_settings(DJANGO_TWILIO_SMS_RETRY_SLEEP=.001)
@patch('django_twilio_sms.models.twilio_client')
def test_twilio_message_with_exception_less_than_five(self, mock_client):
mock_client.messages.get.side_effect = TwilioRestException(
status='test', method='test', uri='test', msg='test', code='test'
)
message = message_recipe.make(sid='test')
with self.assertRaises(TwilioRestException):
message.twilio_message
self.assertEqual(3, mock_client.messages.get.call_count)
@override_settings(DJANGO_TWILIO_SMS_SITE_HOST='www.test.com')
@override_settings(SECURE_SSL_REDIRECT=True)
def test_get_status_callback(self):
self.assertEqual(
Message.get_status_callback(),
'https://www.test.com/twilio-integration/webhooks/callback-view/'
)
@patch('django_twilio_sms.models.unsubscribe_signal')
def test_check_for_subscription_message_if_direction_is_not_inbound(
self, unsubscribe_signal):
from_phone_number = phone_number_recipe.make(unsubscribed=False)
message = message_recipe.make(
body='STOP',
from_phone_number=from_phone_number,
direction=Message.OUTBOUND_API
)
message.check_for_subscription_message()
self.assertFalse(message.from_phone_number.unsubscribed)
unsubscribe_signal.send_robust.assert_not_called()
@patch('django_twilio_sms.models.unsubscribe_signal')
def test_check_for_subscription_message_if_body_in_unsubscribe(
self, unsubscribe_signal):
from_phone_number = phone_number_recipe.make(unsubscribed=False)
message = message_recipe.make(
body='STOP',
from_phone_number=from_phone_number,
direction=Message.INBOUND
)
message.check_for_subscription_message()
self.assertTrue(message.from_phone_number.unsubscribed)
unsubscribe_signal.send_robust.assert_called_once_with(
sender=Message, message=message, unsubscribed=True
)
def test_check_for_subscription_message_if_body_in_subscribe(self):
from_phone_number = phone_number_recipe.make(unsubscribed=True)
message = message_recipe.make(
body='START',
from_phone_number=from_phone_number,
direction=Message.INBOUND
)
message.check_for_subscription_message()
self.assertFalse(message.from_phone_number.unsubscribed)
@patch('django_twilio_sms.models.response_message')
@patch('django_twilio_sms.models.Message.send_message')
def test_send_response_message_if_direction_is_inbound_not_unsubscribed(
self, send_message, response_message):
action = mommy.make(Action, name='STOP')
mommy.make(Response, body='test', action=action)
to_phone_number = phone_number_recipe.make()
from_phone_number = phone_number_recipe.make(unsubscribed=False)
message = message_recipe.make(
body='STOP',
direction=Message.INBOUND,
to_phone_number=to_phone_number,
from_phone_number=from_phone_number
)
message.send_response_message()
send_message.assert_called_with(
body='test',
to=from_phone_number,
from_=to_phone_number
)
response_message.send_robust.assert_called_with(
sender=Message, action=action, message=message
)
@patch('django_twilio_sms.models.response_message')
@patch('django_twilio_sms.models.Message.send_message')
def test_send_response_message_if_direction_is_inbound_is_unsubscribed(
self, send_message, response_message):
action = mommy.make(Action, name='STOP')
mommy.make(Response, body='test', action=action)
to_phone_number = phone_number_recipe.make()
from_phone_number = phone_number_recipe.make(unsubscribed=True)
message = message_recipe.make(
body='STOP',
direction=Message.INBOUND,
to_phone_number=to_phone_number,
from_phone_number=from_phone_number
)
message.send_response_message()
send_message.assert_not_called()
response_message.assert_not_called()
@patch('django_twilio_sms.models.response_message')
@patch('django_twilio_sms.models.Message.send_message')
def test_send_response_message_if_direction_not_inbound(
self, send_message, response_message):
action = mommy.make(Action, name='STOP')
mommy.make(Response, body='test', action=action)
to_phone_number = phone_number_recipe.make()
from_phone_number = phone_number_recipe.make()
message = message_recipe.make(
body='STOP',
direction=Message.OUTBOUND_API,
to_phone_number=to_phone_number,
from_phone_number=from_phone_number
)
message.send_response_message()
send_message.assert_not_called()
response_message.assert_not_called()
@patch('django_twilio_sms.models.Message.check_for_subscription_message')
def test_sync_twilio_message_if_message(
self, check_for_subscription_message):
message = message_recipe.make()
message.sync_twilio_message(self.mock_message())
self.assertEqual(datetime.date(2016, 1, 1), message.date_sent)
self.assertEqual('testaccount', message.account.sid)
self.assertEqual('test', message.body)
self.assertEqual(0, message.num_media)
self.assertEqual(1, message.num_segments)
self.assertEqual(Message.ACCEPTED, message.status)
self.assertEqual(Message.INBOUND, message.direction)
self.assertEqual('-0.00750', message.price)
self.assertEqual('USD', message.currency.code)
self.assertEqual('2016-01-01', message.api_version.date)
self.assertEqual(
'+19999999991', message.from_phone_number.caller.phone_number
)
self.assertEqual(
'+19999999992', message.to_phone_number.caller.phone_number
)
check_for_subscription_message.assert_called_once()
@patch('django_twilio_sms.models.Message.check_for_subscription_message')
@patch(
'django_twilio_sms.models.Message.twilio_message',
new_callable=PropertyMock
)
def test_sync_twilio_message_if_not_message(
self, twilio_message, check_for_subscription_message):
twilio_message.return_value = self.mock_message()
message = message_recipe.make()
message.sync_twilio_message()
self.assertEqual(datetime.date(2016, 1, 1), message.date_sent)
self.assertEqual('testaccount', message.account.sid)
self.assertEqual('test', message.body)
self.assertEqual(0, message.num_media)
self.assertEqual(1, message.num_segments)
self.assertEqual(Message.ACCEPTED, message.status)
self.assertEqual(Message.INBOUND, message.direction)
self.assertEqual('-0.00750', message.price)
self.assertEqual('USD', message.currency.code)
self.assertEqual('2016-01-01', message.api_version.date)
self.assertEqual(
'+19999999991', message.from_phone_number.caller.phone_number
)
self.assertEqual(
'+19999999992', message.to_phone_number.caller.phone_number
)
check_for_subscription_message.assert_called_once()
def test_sync_twilio_message_if_message_service_sid(self):
message = message_recipe.make()
message.sync_twilio_message(self.mock_message(
messaging_service_sid='test')
)
self.assertEqual(1, MessagingService.objects.all().count())
self.assertEqual(
message.messaging_service, MessagingService.objects.first()
)
def test_sync_twilio_message_if_not_message_service_sid(self):
message = message_recipe.make()
message.sync_twilio_message(self.mock_message())
self.assertEqual(0, MessagingService.objects.all().count())
self.assertEqual(message.messaging_service, None)
def test_sync_twilio_message_if_status(self):
message = message_recipe.make()
message.sync_twilio_message(self.mock_message())
self.assertEqual(0, message.status)
def test_sync_twilio_message_if_not_status(self):
message = message_recipe.make()
message.sync_twilio_message(self.mock_message(status=None))
self.assertEqual(9, message.status)
def test_sync_twilio_message_if_error_code(self):
message = message_recipe.make()
error = mommy.make(Error)
message.sync_twilio_message(self.mock_message(error=error))
self.assertEqual(error, message.error)
def test_sync_twilio_message_if_not_error_code(self):
message = message_recipe.make()
message.sync_twilio_message(self.mock_message())
self.assertEqual(0, Error.objects.all().count())
self.assertEqual(None, message.error)
def test_sync_twilio_message_if_price(self):
message = message_recipe.make()
message.sync_twilio_message(self.mock_message())
self.assertEqual('-0.00750', message.price)
def test_sync_twilio_message_if_not_price(self):
message = message_recipe.make()
message.sync_twilio_message(self.mock_message(price=None))
self.assertEqual('0.0', message.price)
class ActionModelTest(CommonTestCase):
def test_unicode(self):
self.string_test('Action', 'ABC', **{'name': 'ABC'})
def test_get_action_no_exception(self):
action = mommy.make(Action, name='TEST')
self.assertEqual(action, Action.get_action('test'))
def test_get_action_with_exception(self):
action = mommy.make(Action, name='UNKNOWN')
self.assertEqual(action, Action.get_action('test'))
def test_get_active_response(self):
action = mommy.make(Action)
responses = mommy.make(Response, active=False, _quantity=3)
responses[2].active = True
responses[2].action = action
responses[2].save()
self.assertEqual(responses[2], action.get_active_response())
def test_save(self):
action = Action(name='test')
action.save()
self.assertEqual('TEST', action.name)
class ResponseModelTest(CommonTestCase):
def test_unicode(self):
self.string_test('Response', 'Response for ABC', **{
'action': mommy.make('django_twilio_sms.Action', name='ABC')
})
def test_if_active_no_exception_self_not_equal_current(self):
action = mommy.make(Action)
mommy.make(Response, action=action, active=True)
response = mommy.make(Response, action=action, active=False)
response.active = True
response.save()
responses = Response.objects.filter(action=action, active=True)
self.assertEqual(
1, responses.count()
)
self.assertEqual(response, responses[0])
def test_if_active_no_exception_self_equal_current(self):
response = mommy.make(Response, active=True)
response.body = 'test'
response.save()
responses = Response.objects.filter(active=True)
self.assertEqual(
1, responses.count()
)
self.assertEqual(response, responses[0])
def test_if_active_with_exception(self):
actions = mommy.make(Action, _quantity=2)
mommy.make(Response, action=actions[1], active=True)
response = Response(body='test', action=actions[0], active=True)
response.save()
responses = Response.objects.filter(action=actions[0], active=True)
self.assertEqual(
1, responses.count()
)
self.assertEqual(response, responses[0])
def test_if_not_active(self):
action = mommy.make(Action)
response = Response(body='test', action=action, active=False)
response.save()
self.assertEqual(1, Response.objects.all().count())
self.assertEqual(response, Response.objects.first())
|
cfc603/django-twilio-sms-models
|
tests/test_models.py
|
Python
|
bsd-3-clause
| 25,815 | 0 |
#-*- coding: utf-8 -*-
import numpy as np
from sklearn.cluster import AgglomerativeClustering as sk_AgglomerativeClustering
from sklearn.externals.joblib import Memory
from .clustering import Clustering
class AgglomerativeClustering(Clustering):
"""docstring for AgglomerativeClustering."""
def __init__(self, data, n_clusters = 2, affinity = 'euclidean',
memory = Memory(cachedir = None), connectivity = None,
compute_full_tree = 'auto', linkage = 'ward',
pooling_func = np.mean):
super(AgglomerativeClustering, self).__init__()
self.data = data
self.n_clusters = n_clusters
self.affinity = affinity
self.memory = memory
self.connectivity = connectivity
self.compute_full_tree = compute_full_tree
self.linkage = linkage
self.pooling_func = pooling_func
def execute(self):
"""Constroi o modelo de clusterizacao."""
self.model = sk_AgglomerativeClustering(n_clusters = self.n_clusters,
affinity = self.affinity,
memory = self.memory,
connectivity = self.connectivity,
compute_full_tree = self.compute_full_tree,
linkage = self.linkage,
pooling_func = self.pooling_func).fit(self.data)
self.clusters = super().make_clusters(self.data, self.model.labels_)
@property
def labels_(self):
"""Retorna os labels dos elementos do dataset."""
return self.model.labels_
@property
def clusters_(self):
"""Retorna um dicionaro onde os indices dos grupos sao as chaves."""
return self.clusters
@property
def model_(self):
"""Retorna o modelo de agrupamento."""
return self.model
|
netoaraujjo/hal
|
clustering/agglomerative_clustering.py
|
Python
|
mit
| 1,946 | 0.023124 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-15 15:10
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('silo', '0028_auto_20170913_0206'),
]
operations = [
migrations.AlterField(
model_name='silo',
name='workflowlevel1',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='silo.WorkflowLevel1'),
),
migrations.AlterField(
model_name='tolauser',
name='workflowlevel1',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='silo.WorkflowLevel1'),
),
migrations.AlterField(
model_name='workflowlevel2',
name='activity_id',
field=models.IntegerField(blank=True, null=True, verbose_name=b'ID'),
),
migrations.AlterField(
model_name='workflowlevel2',
name='workflowlevel1',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='silo.WorkflowLevel1'),
),
]
|
toladata/TolaTables
|
silo/migrations/0029_auto_20170915_0810.py
|
Python
|
gpl-2.0
| 1,233 | 0.003244 |
from os import path
rosalind_id = path.basename(__file__).split('.').pop(0)
dataset = "../datasets/rosalind_{}.txt".format(rosalind_id)
data = open(dataset, 'r').read().splitlines()
|
damonmcminn/rosalind
|
boilerplate.py
|
Python
|
gpl-2.0
| 183 | 0 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django import forms
from django.utils.translation import ugettext_lazy as _
from shuup.core.models import Category
from shuup.xtheme import TemplatedPlugin
from shuup.xtheme.plugins.forms import GenericPluginForm, TranslatableField
class CategoryLinksConfigForm(GenericPluginForm):
"""
A configuration form for the CategoryLinksPlugin
"""
def populate(self):
"""
A custom populate method to display category choices
"""
for field in self.plugin.fields:
if isinstance(field, tuple):
name, value = field
value.initial = self.plugin.config.get(name, value.initial)
self.fields[name] = value
self.fields["categories"] = forms.ModelMultipleChoiceField(
queryset=Category.objects.all_visible(customer=None),
required=False,
initial=self.plugin.config.get("categories", None),
)
def clean(self):
"""
A custom clean method to save category configuration information in a serializable form
"""
cleaned_data = super(CategoryLinksConfigForm, self).clean()
categories = cleaned_data.get("categories", [])
cleaned_data["categories"] = [category.pk for category in categories if hasattr(category, "pk")]
return cleaned_data
class CategoryLinksPlugin(TemplatedPlugin):
"""
A plugin for displaying links to visible categories on the shop front
"""
identifier = "category_links"
name = _("Category Links")
template_name = "shuup/xtheme/plugins/category_links.jinja"
editor_form_class = CategoryLinksConfigForm
fields = [
("title", TranslatableField(label=_("Title"), required=False, initial="")),
("show_all_categories", forms.BooleanField(
label=_("Show all categories"),
required=False,
initial=True,
help_text=_("All categories are shown, even if not selected"),
)),
"categories",
]
def get_context_data(self, context):
"""
A custom get_context_data method to return only visible categories
for request customer.
"""
selected_categories = self.config.get("categories", [])
show_all_categories = self.config.get("show_all_categories", True)
request = context.get("request")
categories = Category.objects.all_visible(
customer=getattr(request, "customer"),
shop=getattr(request, "shop")
)
if not show_all_categories:
categories = categories.filter(id__in=selected_categories)
return {
"title": self.get_translated_value("title"),
"categories": categories,
}
|
shawnadelic/shuup
|
shuup/xtheme/plugins/category_links.py
|
Python
|
agpl-3.0
| 2,994 | 0.001002 |
import json
import os
import re
from django import http
from django.conf import settings
from django.db.transaction import non_atomic_requests
from django.http import HttpResponse, HttpResponseBadRequest
from django.shortcuts import render
from django.utils.encoding import iri_to_uri
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
import commonware.log
import waffle
from django_statsd.clients import statsd
from olympia import amo, api
from olympia.amo.utils import log_cef
from . import monitors
log = commonware.log.getLogger('z.amo')
monitor_log = commonware.log.getLogger('z.monitor')
jp_log = commonware.log.getLogger('z.jp.repack')
flash_re = re.compile(r'^(Win|(PPC|Intel) Mac OS X|Linux.+i\d86)|SunOs',
re.IGNORECASE)
quicktime_re = re.compile(
r'^(application/(sdp|x-(mpeg|rtsp|sdp))|audio/(3gpp(2)?|AMR|aiff|basic|'
r'mid(i)?|mp4|mpeg|vnd\.qcelp|wav|x-(aiff|m4(a|b|p)|midi|mpeg|wav))|'
r'image/(pict|png|tiff|x-(macpaint|pict|png|quicktime|sgi|targa|tiff))|'
r'video/(3gpp(2)?|flc|mp4|mpeg|quicktime|sd-video|x-mpeg))$')
java_re = re.compile(
r'^application/x-java-((applet|bean)(;jpi-version=1\.5|;'
r'version=(1\.(1(\.[1-3])?|(2|4)(\.[1-2])?|3(\.1)?|5)))?|vm)$')
wmp_re = re.compile(
r'^(application/(asx|x-(mplayer2|ms-wmp))|video/x-ms-(asf(-plugin)?|'
r'wm(p|v|x)?|wvx)|audio/x-ms-w(ax|ma))$')
@never_cache
@non_atomic_requests
def monitor(request, format=None):
# For each check, a boolean pass/fail status to show in the template
status_summary = {}
results = {}
checks = ['memcache', 'libraries', 'elastic', 'path',
'redis']
for check in checks:
with statsd.timer('monitor.%s' % check) as timer:
status, result = getattr(monitors, check)()
# state is a string. If it is empty, that means everything is fine.
status_summary[check] = {'state': not status,
'status': status}
results['%s_results' % check] = result
results['%s_timer' % check] = timer.ms
# If anything broke, send HTTP 500.
status_code = 200 if all(a['state']
for a in status_summary.values()) else 500
if format == '.json':
return http.HttpResponse(json.dumps(status_summary),
status=status_code)
ctx = {}
ctx.update(results)
ctx['status_summary'] = status_summary
return render(request, 'services/monitor.html', ctx, status=status_code)
@non_atomic_requests
def robots(request):
"""Generate a robots.txt"""
_service = (request.META['SERVER_NAME'] == settings.SERVICES_DOMAIN)
if _service or not settings.ENGAGE_ROBOTS:
template = "User-agent: *\nDisallow: /"
else:
template = render(request, 'amo/robots.html', {'apps': amo.APP_USAGE})
return HttpResponse(template, content_type="text/plain")
@non_atomic_requests
def contribute(request):
path = os.path.join(settings.ROOT, 'contribute.json')
return HttpResponse(open(path, 'rb'), content_type='application/json')
@non_atomic_requests
def handler403(request):
if request.path_info.startswith('/api/'):
# Pass over to handler403 view in api if api was targeted.
return api.views.handler403(request)
else:
return render(request, 'amo/403.html', status=403)
@non_atomic_requests
def handler404(request):
if request.path_info.startswith('/api/'):
# Pass over to handler404 view in api if api was targeted.
return api.views.handler404(request)
else:
return render(request, 'amo/404.html', status=404)
@non_atomic_requests
def handler500(request):
if request.path_info.startswith('/api/'):
# Pass over to handler500 view in api if api was targeted.
return api.views.handler500(request)
else:
return render(request, 'amo/500.html', status=500)
@non_atomic_requests
def csrf_failure(request, reason=''):
return render(request, 'amo/403.html',
{'because_csrf': 'CSRF' in reason}, status=403)
@non_atomic_requests
def loaded(request):
return http.HttpResponse('%s' % request.META['wsgi.loaded'],
content_type='text/plain')
@csrf_exempt
@require_POST
@non_atomic_requests
def cspreport(request):
"""Accept CSP reports and log them."""
report = ('blocked-uri', 'violated-directive', 'original-policy')
if not waffle.sample_is_active('csp-store-reports'):
return HttpResponse()
try:
v = json.loads(request.body)['csp-report']
# If possible, alter the PATH_INFO to contain the request of the page
# the error occurred on, spec: http://mzl.la/P82R5y
meta = request.META.copy()
meta['PATH_INFO'] = v.get('document-uri', meta['PATH_INFO'])
v = [(k, v[k]) for k in report if k in v]
log_cef('CSPViolation', 5, meta, username=request.user,
signature='CSPREPORT',
msg='A client reported a CSP violation',
cs6=v, cs6Label='ContentPolicy')
except (KeyError, ValueError), e:
log.debug('Exception in CSP report: %s' % e, exc_info=True)
return HttpResponseBadRequest()
return HttpResponse()
@non_atomic_requests
def version(request):
path = os.path.join(settings.ROOT, 'version.json')
return HttpResponse(open(path, 'rb'), content_type='application/json')
@non_atomic_requests
def plugin_check_redirect(request):
return http.HttpResponseRedirect('%s?%s' % (
settings.PFS_URL, iri_to_uri(request.META.get('QUERY_STRING', ''))))
|
jpetto/olympia
|
src/olympia/amo/views.py
|
Python
|
bsd-3-clause
| 5,726 | 0 |
# -*- coding: UTF-8 -*-
## Copyright 2009-2013 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, see <http://www.gnu.org/licenses/>.
"""
This creates a list `config_dirs` of all
configuration directories by looping through :setting:`INSTALLED_APPS`
and taking those whose source directory has a :xfile:`config` subdir.
..
DO NOT import this module at the global level of a models module
because importing it will fill the config dirs, i.e. will try to import
every installed `models` module.
The mechanism in this module emulates the behaviour of Django's
(or Jinja's) template loaders.
It was written before I discovered Jinja and became less used afterwards.
But we still need it to find the `.odt` files for
:class:`AppyBuildMethod <lino.mixins.printable.AppyBuildMethod>`.
This task cannot be done using Jinja because
Jinja's `get_template` method returns a `Template`,
and Jinja templates don't know their filename,
the only thing needed by
:class:`AppyBuildMethod <lino.mixins.printable.AppyBuildMethod>`.
One possibility might be to write a special Jinja Template class...
Die Reihenfolge in :setting:`INSTALLED_APPS` sollte sein: zuerst
`django.contrib.*`, dann ``lino``, dann `lino.modlib.*`
und dann `lino.projects.pcsw`.
Also vom Allgemeineren zum Spezifischeren. Und bei den config-Dirs soll diese
Liste umgekehrt abgeklappert werden (und die Suche beim
ersten Treffer aufhören): zuerst das eventuelle lokale `config_dir`,
dann `lino.projects.pcsw`, dann die diversen `lino.modlib.*` usw.
"""
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
import os
from os.path import join, abspath, dirname, normpath, isdir
import sys
import codecs
import glob
from fnmatch import fnmatch
from django.utils.importlib import import_module
from django.conf import settings
from lino import ad
from lino.utils import iif
SUBDIR_NAME = 'config' # we might change this to "templates"
class ConfigDir:
"""
A configuration directory is a directory that may contain configuration files.
"""
def __init__(self,name,writeable):
self.name = os.path.abspath(name)
self.writeable = writeable
def __repr__(self):
return "ConfigDir %s" % self.name + iif(self.writeable," (writeable)","")
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
config_dirs = []
for pth in settings.SITE.get_settings_subdirs(SUBDIR_NAME):
config_dirs.append(ConfigDir(pth.decode(fs_encoding),False))
def add_config_dir(name,mod):
pth = join(dirname(mod.__file__),SUBDIR_NAME)
if isdir(pth):
config_dirs.append(ConfigDir(pth.decode(fs_encoding),False))
settings.SITE.for_each_app(add_config_dir)
#~ for app in settings.INSTALLED_APPS:
#~ app_mod = import_module(app)
#~ app = getattr(app_mod,'App',None)
#~ if isinstance(app,ad.App) and app.extends:
#~ parent = import_module(app.extends)
#~ add_config_dir(parent)
#~ add_config_dir(app_mod)
LOCAL_CONFIG_DIR = None
#~ if settings.SITE.project_dir != settings.SITE.source_dir:
if settings.SITE.is_local_project_dir:
p = join(settings.SITE.project_dir,SUBDIR_NAME)
if isdir(p):
LOCAL_CONFIG_DIR = ConfigDir(p,True)
config_dirs.append(LOCAL_CONFIG_DIR)
config_dirs.reverse()
config_dirs = tuple(config_dirs)
#~ logger.debug('config_dirs:\n%s', '\n'.join([repr(cd) for cd in config_dirs]))
#~ for app_name in settings.INSTALLED_APPS:
#~ app = import_module(app_name)
#~ fn = getattr(app,'__file__',None)
#~ if fn is not None:
#~ pth = join(dirname(fn),'config')
#~ if isdir(pth):
#~ config_dirs.append(ConfigDir(pth,False))
#~ LOCAL_CONFIG_DIR = ConfigDir(join(settings.PROJECT_DIR,'config'),True)
#~ config_dirs.append(LOCAL_CONFIG_DIR)
def find_config_file(fn,group=''):
if os.path.isabs(fn):
return fn
if group:
prefix = join(*(group.split('/')))
else:
prefix = ''
for cd in config_dirs:
ffn = join(cd.name,prefix,fn)
if os.path.exists(ffn):
return ffn
def find_config_files(pattern,group=''):
"""
Returns a dict of filename -> config_dir entries for
each config file on this site that matches the pattern.
Loops through `config_dirs` and collects matching files.
When more than one file of the same name exists in different
applications it gets overridden by later apps.
`group` is e.g. '','foo', 'foo/bar',...
"""
if group:
prefix = os.path.sep + join(*(group.split('/')))
#~ if not group.endswith('/'):
#~ group += '/'
else:
prefix = ''
files = {}
for cd in config_dirs:
pth = cd.name + prefix
#~ print 'find_config_files() discover', pth, pattern
if isdir(pth):
for fn in os.listdir(pth):
if fnmatch(fn,pattern):
files.setdefault(fn,cd)
#~ if not files.has_key(fn):
#~ files[fn] = cd
#~ else:
#~ print 'find_config_files() not a directory:', pth
return files
def find_template_config_files(template_ext,templates_group):
"""
find_config_files and ignore babel variants:
e.g. ignore "foo_fr.html" if "foo.html" exists
but don't ignore "my_template.html"
"""
files = find_config_files('*' + template_ext,templates_group)
l = []
template_ext
for name in files.keys():
basename = name[:-len(template_ext)]
chunks = basename.split('_')
if len(chunks) > 1:
basename = '_'.join(chunks[:-1])
if files.has_key(basename + template_ext):
continue
l.append(name)
l.sort()
if not l:
logger.warning("email_template_choices() : no matches for (%r,%r)",
'*' + template_ext,templates_group)
return l
def load_config_files(loader,pattern,group=''):
"""
Naming conventions for :xfile:`*.dtl` files are:
- the first detail is called appname.Model.dtl
- If there are more Details, then they are called
appname.Model.2.dtl, appname.Model.3.dtl etc.
The `sort()` below must remove the filename extension (".dtl")
because otherwise the frist Detail would come last.
"""
files = find_config_files(pattern,group).items()
def fcmp(a,b):
return cmp(a[0][:-4],b[0][:-4])
files.sort(fcmp)
prefix = group.replace("/",os.sep)
for filename,cd in files:
filename = join(prefix,filename)
ffn = join(cd.name,filename)
logger.debug("Loading %s...",ffn)
s = codecs.open(ffn,encoding='utf-8').read()
loader(s,cd,filename)
class Configured(object):
#~ filename = None
#~ cd = None # ConfigDir
def __init__(self,filename=None,cd=None):
if filename is not None:
assert not os.pardir in filename
#~ assert not os.sep in filename
if cd is None:
cd = LOCAL_CONFIG_DIR
self.filename = filename
self.cd = cd
self.messages = set()
def save_config(self):
if not self.filename:
raise IOError('Cannot save unnamed %s' % self)
if self.cd is None:
raise IOError("Cannot save because there is no local config directory")
if not self.cd.writeable:
#~ print self.cd, "is not writable", self.filename
self.cd = LOCAL_CONFIG_DIR
fn = join(self.cd.name,self.filename)
pth = dirname(fn)
settings.SITE.makedirs_if_missing(pth)
#~ if not os.path.exists(pth):
#~ os.makedirs(pth)
f = codecs.open(fn,'w',encoding='utf-8')
self.write_content(f)
f.close()
msg = "%s has been saved to %s" % (self.__class__.__name__,fn)
logger.info(msg)
return msg
def make_dummy_messages_file(self):
"""
Make dummy messages file for this Configurable.
Calls the global :func:`make_dummy_messages_file`
"""
if not self.filename: return
if self.cd is None: return
fn = join(self.cd.name,self.filename)
if self.cd.writeable:
logger.info("Not writing %s because %s is writeable",
self.filename,self.cd.name)
return
#~ if self.messages:
"""
if there are no messages, we still write a
new file to remove messages from pervious versions.
"""
make_dummy_messages_file(fn,self.messages)
def add_dummy_message(self,msg):
self.messages.add(msg)
def write_content(self,f):
raise NotImplementedError
def __str__(self):
if self.filename:
return u"%s (from %s)" % (self.filename,self.cd)
return "Dynamic " + super(Configured,self).__str__()
# "%s(%r)" % (self.__class__.__name__,self._desc)
IGNORE_TIMES = False
MODIFY_WINDOW = 2
def must_make(src,target):
"returns True if src is newer than target"
try:
src_st = os.stat(src)
src_mt = src_st.st_mtime
except OSError,e:
# self.error("os.stat() failed: ",e)
return False
try:
target_st = os.stat(target)
target_mt = target_st.st_mtime
except OSError,e:
# self.error("os.stat() failed: %s",e)
return True
if src_mt - target_mt > MODIFY_WINDOW:
return True
return False
def make_dummy_messages_file(src_fn,messages):
"""
Write a dummy `.py` source file containing
translatable messages that getmessages will find.
"""
target_fn = src_fn + '.py'
if not must_make(src_fn,target_fn):
logger.debug("%s is up-to-date.", target_fn)
return
try:
f = file(target_fn,'w')
except IOError,e:
logger.warning("Could not write file %s : %s", target_fn, e)
return
f.write("# this file is generated by Lino\n")
f.write("from django.utils.translation import ugettext\n")
for m in messages:
f.write("ugettext(%r)\n" % m)
f.close()
logger.info("Wrote %d dummy messages to %s.", len(messages),target_fn)
|
MaxTyutyunnikov/lino
|
lino/utils/config.py
|
Python
|
gpl-3.0
| 11,277 | 0.018003 |
import unittest
from literoticapi.author import *
class testStory(unittest.TestCase):
def setUp(self):
self.author = Author(868670)
def testGetSeriesAndNonSeries(self):
assert len(self.author.get_stories()) >= 132
if __name__ == "__main__":
unittest.main()
|
hrroon/literoticapi
|
test/author_test.py
|
Python
|
gpl-3.0
| 290 | 0.006897 |
# Copyright (c) 2008 Simplistix Ltd
# See license.txt for license details.
from mock import Mock
from testfixtures import wrap,compare
from unittest import TestCase,TestSuite,makeSuite
class TestWrap(TestCase):
def test_wrapping(self):
m = Mock()
@wrap(m.before,m.after)
def test_function(r):
m.test()
return 'something'
compare(m.method_calls,[])
compare(test_function(),'something')
compare(m.method_calls,[
('before', (), {}),
('test', (), {}),
('after', (), {})
])
def test_wrapping_only_before(self):
before = Mock()
@wrap(before)
def test_function():
return 'something'
self.assertFalse(before.called)
compare(test_function(),'something')
compare(before.call_count,1)
def test_wrapping_wants_return(self):
m = Mock()
m.before.return_value = 'something'
@wrap(m.before,m.after)
def test_function(r):
m.test(r)
return 'r:'+r
compare(m.method_calls,[])
compare(test_function(),'r:something')
compare(m.method_calls,[
('before', (), {}),
('test', ('something',), {}),
('after', (), {})
])
def test_wrapping_wants_arguments(self):
# This only works in python 2.5+, for
# earlier versions, you'll have to come
# up with your own `partial` class...
from functools import partial
m = Mock()
@wrap(partial(m.before,1,x=2),partial(m.after,3,y=4))
def test_function(r):
m.test()
return 'something'
compare(m.method_calls,[])
compare(test_function(),'something')
compare(m.method_calls,[
('before', (1,), {'x':2}),
('test', (), {}),
('after', (3,), {'y':4})
])
def test_multiple_wrappers(self):
m = Mock()
@wrap(m.before2,m.after2)
@wrap(m.before1,m.after1)
def test_function():
m.test_function()
return 'something'
compare(m.method_calls,[])
compare(test_function(),'something')
compare(m.method_calls,[
('before1', (), {}),
('before2', (), {}),
('test_function', (), {}),
('after2', (), {}),
('after1', (), {}),
])
def test_multiple_wrappers_wants_return(self):
m = Mock()
m.before1.return_value=1
m.before2.return_value=2
@wrap(m.before2,m.after2)
@wrap(m.before1,m.after1)
def test_function(r1,r2):
m.test_function(r1,r2)
return 'something'
compare(m.method_calls,[])
compare(test_function(),'something')
compare(m.method_calls,[
('before1', (), {}),
('before2', (), {}),
('test_function', (1,2), {}),
('after2', (), {}),
('after1', (), {}),
])
def test_multiple_wrappers_only_want_first_return(self):
m = Mock()
m.before1.return_value=1
@wrap(m.before2,m.after2)
@wrap(m.before1,m.after1)
def test_function(r1):
m.test_function(r1)
return 'something'
compare(m.method_calls,[])
compare(test_function(),'something')
compare(m.method_calls,[
('before1', (), {}),
('before2', (), {}),
('test_function', (1,), {}),
('after2', (), {}),
('after1', (), {}),
])
def test_wrap_method(self):
m = Mock()
class T:
@wrap(m.before,m.after)
def method(self):
m.method()
T().method()
compare(m.method_calls,[
('before', (), {}),
('method', (), {}),
('after', (), {})
])
def test_wrap_method_wants_return(self):
m = Mock()
m.before.return_value = 'return'
class T:
@wrap(m.before,m.after)
def method(self,r):
m.method(r)
T().method()
compare(m.method_calls,[
('before', (), {}),
('method', ('return',), {}),
('after', (), {})
])
def test_wrapping_different_functions(self):
m = Mock()
@wrap(m.before1,m.after1)
def test_function1():
m.something1()
return 'something1'
@wrap(m.before2,m.after2)
def test_function2():
m.something2()
return 'something2'
compare(m.method_calls,[])
compare(test_function1(),'something1')
compare(m.method_calls,[
('before1', (), {}),
('something1', (), {}),
('after1', (), {})
])
compare(test_function2(),'something2')
compare(m.method_calls,[
('before1', (), {}),
('something1', (), {}),
('after1', (), {}),
('before2', (), {}),
('something2', (), {}),
('after2', (), {})
])
def test_wrapping_local_vars(self):
m = Mock()
@wrap(m.before,m.after)
def test_function():
something = 1
from testfixtures.tests import sample2
m.test()
return 'something'
compare(m.method_calls,[])
compare(test_function(),'something')
compare(m.method_calls,[
('before', (), {}),
('test', (), {}),
('after', (), {})
])
def test_wrapping__name__(self):
m = Mock()
@wrap(m.before,m.after)
def test_function():
pass # pragma: no cover
compare(test_function.__name__,'test_function')
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/testfixtures/tests/test_wrap.py
|
Python
|
agpl-3.0
| 5,995 | 0.012677 |
OperandLookupTable = b''.join([
b'\x81\xbd\x81\xbd\x41\x7d\x00\x00\x81\xbd\x81\xbd\x41\x7d\x00\x00'
b'\x81\xbd\x81\xbd\x41\x7d\x00\x00\x81\xbd\x81\xbd\x41\x7d\x00\x00'
b'\x81\xbd\x81\xbd\x41\x7d\x00\x00\x81\xbd\x81\xbd\x41\x7d\x00\x00'
b'\x81\xbd\x81\xbd\x41\x7d\x00\x00\x81\xbd\x81\xbd\x41\x7d\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\xbf\x82\x00\x00\x00\x00\x7d\xfd\x41\xc1\x00\x00\x00\x00'
b'\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41'
b'\xc1\xfd\xc1\xc1\x81\xbd\x81\xbd\x81\xbd\x81\xbd\x82\x88\x82\xbd'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7a\x00\x00\x00\x00\x00'
b'\x41\x7d\x41\x7d\x00\x00\x00\x00\x41\x7d\x00\x00\x00\x00\x00\x00'
b'\x41\x41\x41\x41\x41\x41\x41\x41\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d'
b'\xc1\xc1\x42\x00\xba\xba\xc1\xfd\x41\x00\x42\x00\x00\x41\x00\x00'
b'\x81\xbd\x81\xbd\x41\x41\x00\x00\x84\x84\x84\x84\x84\x84\x82\x82'
b'\x41\x41\x41\x41\x41\x41\x41\x41\x7d\x7d\x7a\x41\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\xc1\xfd\x00\x00\x00\x00\x00\x00\x81\xbd'
b'\x82\x84\x82\x82\x00\x00\x00\x00\x00\x00\x00\x00\x00\xbd\x00\xc1'
b'\x84\x84\x88\x88\x88\x88\x88\x88\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd'
b'\x84\x84\x84\x84\x84\x00\x84\x00\x84\x84\x88\x84\x84\x84\x84\x84'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd'
b'\x84\x84\x84\x84\x84\x84\x84\x84\x84\x84\x84\x90\x84\x84\x84\x84'
b'\x84\x84\x84\x84\x84\x84\x84\x88\x88\x88\x88\x88\x90\x90\x84\x88'
b'\xc1\xc1\xc1\xc1\x88\x88\x88\x00\x84\x84\x00\x00\x84\x84\x88\x88'
b'\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d'
b'\x81\x81\x81\x81\x81\x81\x81\x81\x81\x81\x81\x81\x81\x81\x81\x81'
b'\x00\x00\x00\xbd\xc1\xbd\x00\x00\x00\x00\x00\xbd\xc1\xbd\x84\xbd'
b'\x81\xbd\xba\xbd\xba\xba\x81\x82\x00\x00\xc1\xbd\xbd\xbd\x81\x82'
b'\x81\xbd\xc1\xbc\xc1\xc1\xc1\x88\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x84\x88\x88\x88\x88\x88\x88\x88\x88\x88\x88\x84\x88\x88\x88\x88'
b'\x88\x88\x88\x88\x88\x88\x84\x88\x88\x88\x88\x88\x88\x88\x88\x88'
b'\x90\x88\x88\x88\x88\x84\x88\x88\x88\x88\x88\x88\x88\x88\x88\x00'
])
|
arizvisa/syringe
|
lib/ia32/_optable.py
|
Python
|
bsd-2-clause
| 2,339 | 0 |
# -*- coding: UTF-8 -*-
import requests
from bs4 import BeautifulSoup, UnicodeDammit
import time
import os
import re
import log
import tools
class Get(object):
# timeout, retry_interval -> seconds
def __init__(self, url='', timeout=5, retry=5, retry_interval=2, proxies={}, headers={}, download_file=None, savepath='.'):
self.log = log.Log()
self.url = url
self.timeout = timeout
self.retry = retry
self.retry_interval = retry_interval
self.proxies = proxies
self.headers = headers
if download_file is None:
self.download_file = False
else:
self.download_file = download_file
self.savepath = savepath
self.download_result = None
self.__run()
def config(self, url='', timeout=5, retry=5, retry_interval=2, proxies={}, headers={}, download_file=None, savepath=''):
self.url = url
self.timeout = timeout
self.retry = retry
self.retry_interval = retry_interval
if len(proxies) > 0:
self.proxies = proxies
if not download_file is None:
self.download_file = download_file
if len(headers) > 0:
self.headers = headers
if savepath != '':
self.savepath = savepath
self.__run()
def __run(self):
if self.url != '':
self.s = requests.Session()
self.__get()
if self.download_file:
self.__download()
else:
self.__soup()
def __get(self):
if self.url == '':
return False
self.log.info('start get [%s]'%self.url)
self.r = None
for i in range(self.retry):
try:
self.r = self.s.get(self.url, timeout=self.timeout, proxies=self.proxies, headers=self.headers)
break
except Exception as e:
self.log.error( '[retry %d] get [%s] fail. except [%s]'%(i+1, self.url, str(e)) )
time.sleep(self.retry_interval)
if self.r is None:
self.log.error('get [%s] fail' % self.url)
return False
self.log.info('end, get [%s]' % self.url)
return True
def __soup(self):
if not self.r:
self.log.error('self.r is None, cannot get soup. url [%s]' % self.url)
return False
if self.download_file:
self.log.info('to download url [%s], should not get soup' % self.url)
return False
self.soup = None
try:
self.soup = BeautifulSoup(self.r.content, 'html.parser')
return True
except Exception as e:
log.error('contruct BeautifulSoup fail, url [%s], except [%s]' & (self.url, str(e)))
return False
def __download(self):
self.log.info('start download [%s]' % self.url)
if self.r is None:
self.log.error('self.r is None. download fail. url [%s]' % self.url)
return False
filepath = self.savepath
tools.mkdir(filepath)
r = self.r
url = self.url
# 获取headers中的content-length
tot_size = 0
try:
tot_size = int( r.headers['content-length'] )
except Exception as e:
self.log.error('cannot get content-length, url [%s], headers [%s]' % (url, str(r.headers)) )
# get file name
filename = self.__get_file_name()
chunk_size = 4096
flag = 3
# retry if size is not right.
for i in range(3):
now_size = 0
try:
#print filename, type(filename)
with open( os.path.join(self.savepath, filename), 'wb' ) as f:
for chunk in r.iter_content(chunk_size):
now_size = now_size + len(chunk)
f.write(chunk)
except Exception as e:
self.log.error(u'something wrong. url [%s], exception [%s], 文件名 [%s], retry [%d]' % (url, unicode(e), filename, i+1) )
flag = 3
if tot_size == 0:
self.log.info(u'获取文件size失败,无法校验。 获取的文件大小 [%d], 文件名 [%s], url [%s]' % (now_size, filename, url) )
flag = 0
break
if now_size != tot_size:
self.log.error('文件size不正确. 获取的文件大小 [%d], 正确文件大小 [%d], 文件名 [%s], url[%s], retry[%d]' % (now_size, tot_size, filename.encode('utf8'), url, i+1))
flag = 4
else:
flag = 0
break
time.sleep(1)
self.log.info('end download [%s]' % self.url)
self.download_result = {'errno': flag, 'filename': filename}
return self.download_result
def __get_file_name(self):
# 通过content-type获取后缀
r = self.r
url = self.url
suf = ''
try:
ct = r.headers['content-type']
ctl = ct.split(';')
for i in ctl:
try:
suf = constant.CONTENT_TYPE_REVERSE[i.strip()]
except Exception as e:
pass
except Exception as e:
self.log.error('cannot get suffix, url[%s], headers [%s]' % (url, str(r.headers)))
# 尝试获取Content-Disposition,并以该项中的文件名及后缀优先
try:
content_disposition = r.headers['Content-Disposition']
fntmp = re.findall(r'filename=[\"\'](.*?)[\"\']', content_disposition)[0]
pos = fntmp.rfind('.')
if pos > -1:
fn = fntmp[:pos]
suf = fntmp[pos:]
else:
fn = fntmp
if filename is None:
filename = fn
dammit = UnicodeDammit(filename, ['utf-8', 'gb2312', 'gbk'])
filename = dammit.unicode_markup
except Exception as e:
pass
# url中的文件名以及后缀
pos = url.rfind("/") + 1
if pos >= len(url) or pos == -1:
fn = str(time.time()).replace(".", "")
else:
fn = url[pos:]
pos = fn.rfind('.')
if pos >= len(fn) or pos == -1:
pass
else:
if suf == '':
suf = fn[pos:]
try:
fn = fn[:pos]
except Exception as e:
pass
filename = fn
dammit = UnicodeDammit(filename, ['utf-8', 'gb2312', 'gbk'])
filename = dammit.unicode_markup
# 这里要判断一下有没有重名的文件,并做处理
i = 0
while True:
if i == 0:
if not os.path.exists( os.path.join(self.savepath, filename+suf) ):
break
else:
if not os.path.exists( os.path.join(self.savepath, filename+("(%d)"%i)+suf ) ):
filename = filename + ("(%d)"%i)
break
i = i + 1
filename = filename + suf
# 确保文件名合法(windows)
filename = tools.replaceBanCharacter(filename)
return filename
def download(self, url, savepath=''):
self.url = url
self.download_file = True
if savepath != '':
self.savepath = savepath
return self.__download()
def get(self):
return self.r
def soup(self):
return self.soup
def getsoup(self):
return (self.r, self.soup)
def clear_headers(self):
self.headers = {}
def clear_proxies(self):
self.proxies = {}
def stop(self):
self.log.stop()
def __del__(self):
self.stop()
|
BD777/WindPythonToy
|
comm/network.py
|
Python
|
apache-2.0
| 7,827 | 0.004999 |
from __future__ import absolute_import, division, unicode_literals
from django.utils.translation import ugettext_lazy as _
from django.utils.safestring import mark_safe
from debug_toolbar.panels import Panel
from debug_toolbar import settings as dt_settings
import cProfile
from pstats import Stats
from colorsys import hsv_to_rgb
import os
class DjangoDebugToolbarStats(Stats):
__root = None
def get_root_func(self):
if self.__root is None:
for func, (cc, nc, tt, ct, callers) in self.stats.items():
if len(callers) == 0:
self.__root = func
break
return self.__root
class FunctionCall(object):
def __init__(self, statobj, func, depth=0, stats=None,
id=0, parent_ids=[], hsv=(0, 0.5, 1)):
self.statobj = statobj
self.func = func
if stats:
self.stats = stats
else:
self.stats = statobj.stats[func][:4]
self.depth = depth
self.id = id
self.parent_ids = parent_ids
self.hsv = hsv
def parent_classes(self):
return self.parent_classes
def background(self):
r, g, b = hsv_to_rgb(*self.hsv)
return 'rgb(%f%%,%f%%,%f%%)' % (r * 100, g * 100, b * 100)
def func_std_string(self): # match what old profile produced
func_name = self.func
if func_name[:2] == ('~', 0):
# special case for built-in functions
name = func_name[2]
if name.startswith('<') and name.endswith('>'):
return '{%s}' % name[1:-1]
else:
return name
else:
file_name, line_num, method = self.func
idx = file_name.find('/site-packages/')
if idx > -1:
file_name = file_name[(idx + 14):]
file_path, file_name = file_name.rsplit(os.sep, 1)
return mark_safe(
'<span class="path">{0}/</span>'
'<span class="file">{1}</span>'
' in <span class="func">{3}</span>'
'(<span class="lineno">{2}</span>)'.format(
file_path,
file_name,
line_num,
method))
def subfuncs(self):
i = 0
h, s, v = self.hsv
count = len(self.statobj.all_callees[self.func])
for func, stats in self.statobj.all_callees[self.func].items():
i += 1
h1 = h + (i / count) / (self.depth + 1)
if stats[3] == 0:
s1 = 0
else:
s1 = s * (stats[3] / self.stats[3])
yield FunctionCall(self.statobj,
func,
self.depth + 1,
stats=stats,
id=str(self.id) + '_' + str(i),
parent_ids=self.parent_ids + [self.id],
hsv=(h1, s1, 1))
def count(self):
return self.stats[1]
def tottime(self):
return self.stats[2]
def cumtime(self):
cc, nc, tt, ct = self.stats
return self.stats[3]
def tottime_per_call(self):
cc, nc, tt, ct = self.stats
if nc == 0:
return 0
return tt / nc
def cumtime_per_call(self):
cc, nc, tt, ct = self.stats
if cc == 0:
return 0
return ct / cc
def indent(self):
return 16 * self.depth
class ProfilingPanel(Panel):
"""
Panel that displays profiling information.
"""
title = _("Profiling")
template = 'debug_toolbar/panels/profiling.html'
def process_view(self, request, view_func, view_args, view_kwargs):
self.profiler = cProfile.Profile()
args = (request,) + view_args
return self.profiler.runcall(view_func, *args, **view_kwargs)
def add_node(self, func_list, func, max_depth, cum_time=0.1):
func_list.append(func)
func.has_subfuncs = False
if func.depth < max_depth:
for subfunc in func.subfuncs():
if subfunc.stats[3] >= cum_time:
func.has_subfuncs = True
self.add_node(func_list, subfunc, max_depth, cum_time=cum_time)
def process_response(self, request, response):
if not hasattr(self, 'profiler'):
return None
# Could be delayed until the panel content is requested (perf. optim.)
self.profiler.create_stats()
self.stats = DjangoDebugToolbarStats(self.profiler)
self.stats.calc_callees()
root = FunctionCall(self.stats, self.stats.get_root_func(), depth=0)
func_list = []
self.add_node(func_list,
root,
dt_settings.CONFIG['PROFILER_MAX_DEPTH'],
root.stats[3] / 8)
self.record_stats({'func_list': func_list})
|
nirmeshk/oh-mainline
|
vendor/packages/django-debug-toolbar/debug_toolbar/panels/profiling.py
|
Python
|
agpl-3.0
| 4,968 | 0.000201 |
"""
For types associated with installation schemes.
For a general overview of available schemes and their context, see
https://docs.python.org/3/install/index.html#alternate-installation.
"""
SCHEME_KEYS = ['platlib', 'purelib', 'headers', 'scripts', 'data']
class Scheme:
"""A Scheme holds paths which are used as the base directories for
artifacts associated with a Python package.
"""
__slots__ = SCHEME_KEYS
def __init__(
self,
platlib, # type: str
purelib, # type: str
headers, # type: str
scripts, # type: str
data, # type: str
):
self.platlib = platlib
self.purelib = purelib
self.headers = headers
self.scripts = scripts
self.data = data
|
google/material-design-icons
|
update/venv/lib/python3.9/site-packages/pip/_internal/models/scheme.py
|
Python
|
apache-2.0
| 770 | 0 |
# David Tsui 2.9.2016
# Human Languages and Technologies
# Dr. Rebecca Hwa
from Ngrams import *
#TRAIN
train_file = open("tokenized_train.txt","r")
train_str = train_file.read();
tri = Trigram(1)
print "Begin training vocabulary----------------------"
tri.trainVocabulary(train_str)
#tri.printVocabulary()
#Takes in questions for development
dev_file = open("Holmes.lm_format.questions.txt")
output_file = open("holmes_output.txt","w+")
print "Begin calculating perplexity----------------------"
for i, line in enumerate(dev_file):
#Clean text by removing all quotations
line = line[:-1]
exclude = set(string.punctuation)
s = ''.join(ch for ch in line if ch not in exclude)
s = s.lower()
#Lambda factors
lu = .3
lb = .3
lt = .4
print "Question %d complete" %(i)
perplexity = tri.getPerWordPerplexityInterpolated(s,lu,lb,lt)
newline = "%s\t%f\n"%(line,perplexity)
output_file.write(newline)
|
luckyleap/NLP_Projects
|
MSChallenge Ngrams/MSChallengeNGrams.py
|
Python
|
mit
| 908 | 0.030837 |
# -*- coding: UTF-8 -*-
# Copyright 2009-2019 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from django.conf import settings
from django.utils.encoding import force_str
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.apps import apps ; get_models = apps.get_models
from lino.api import dd, rt
from lino.core import actions
from lino.core.utils import full_model_name
from lino.core.roles import SiteStaff
from lino.modlib.printing.choicelists import BuildMethods
from lino.modlib.checkdata.choicelists import Checker
# import them here to have them on rt.models.system:
from .choicelists import YesNo, Genders, PeriodEvents
from .mixins import Lockable
class BuildSiteCache(dd.Action):
label = _("Rebuild site cache")
url_action_name = "buildjs"
def run_from_ui(self, ar):
settings.SITE.kernel.default_renderer.build_site_cache(True)
return ar.success(
"""\
Seems that it worked. Refresh your browser.
<br>
Note that other users might experience side effects because
of the unexpected .js update, but there are no known problems so far.
Please report any anomalies.""",
alert=_("Success"))
class SiteConfigManager(models.Manager):
def get(self, *args, **kwargs):
return settings.SITE.site_config
class SiteConfig(dd.Model):
class Meta(object):
abstract = dd.is_abstract_model(__name__, 'SiteConfig')
verbose_name = _("Site configuration")
objects = SiteConfigManager()
real_objects = models.Manager()
default_build_method = BuildMethods.field(
verbose_name=_("Default build method"),
blank=True, null=True)
simulate_today = models.DateField(
_("Simulated date"), blank=True, null=True)
site_company = dd.ForeignKey(
"contacts.Company",
blank=True, null=True,
verbose_name=_("Site owner"),
related_name='site_company_sites')
def __str__(self):
return force_str(_("Site Parameters"))
def update(self, **kw):
"""
Set some field of the SiteConfig object and store it to the
database.
"""
# print("20180502 update({})".format(kw))
for k, v in kw.items():
if not hasattr(self, k):
raise Exception("SiteConfig has no attribute %r" % k)
setattr(self, k, v)
self.full_clean()
self.save()
def save(self, *args, **kw):
# print("20180502 save() {}".format(dd.obj2str(self, True)))
super(SiteConfig, self).save(*args, **kw)
settings.SITE.clear_site_config()
def my_handler(sender, **kw):
# print("20180502 {} my_handler calls clear_site_config()".format(
# settings.SITE))
settings.SITE.clear_site_config()
#~ kw.update(sender=sender)
# dd.database_connected.send(sender)
#~ dd.database_connected.send(sender,**kw)
from django.test.signals import setting_changed
from lino.core.signals import testcase_setup
setting_changed.connect(my_handler)
testcase_setup.connect(my_handler)
dd.connection_created.connect(my_handler)
models.signals.post_migrate.connect(my_handler)
class SiteConfigs(dd.Table):
model = 'system.SiteConfig'
required_roles = dd.login_required(SiteStaff)
# default_action = actions.ShowDetail()
#~ has_navigator = False
hide_navigator = True
allow_delete = False
# hide_top_toolbar = True
#~ can_delete = perms.never
detail_layout = dd.DetailLayout("""
default_build_method
# lino.ModelsBySite
""", window_size=(60, 'auto'))
@classmethod
def get_default_action(cls):
return cls.detail_action
do_build = BuildSiteCache()
# if settings.SITE.user_model == 'users.User':
# dd.inject_field(settings.SITE.user_model,
# 'user_type', UserTypes.field())
# dd.inject_field(settings.SITE.user_model, 'language', dd.LanguageField())
class BleachChecker(Checker):
verbose_name = _("Find unbleached html content")
model = dd.Model
def get_checkable_models(self):
for m in super(BleachChecker, self).get_checkable_models():
if len(m._bleached_fields):
yield m
def get_checkdata_problems(self, obj, fix=False):
t = tuple(obj.fields_to_bleach())
if len(t):
fldnames = ', '.join([f.name for f, old, new in t])
yield (True, _("Fields {} have unbleached content.").format(fldnames))
if fix:
obj.before_ui_save(None, None)
obj.full_clean()
obj.save()
BleachChecker.activate()
|
lino-framework/lino
|
lino/modlib/system/models.py
|
Python
|
bsd-2-clause
| 4,683 | 0.002776 |
#
# Author: Martin Sandve Alnes
# Date: 2008-10-03
#
from ufl import (Coefficient, TestFunction, TrialFunction, VectorElement, dot,
dx, grad, triangle)
element = VectorElement("Lagrange", triangle, 1)
u = TrialFunction(element)
v = TestFunction(element)
w = Coefficient(element)
a = dot(dot(w, grad(u)), v) * dx
|
FEniCS/ufl
|
demo/ExplicitConvection.py
|
Python
|
lgpl-3.0
| 332 | 0 |
from __future__ import unicode_literals
import logging
import os
import tornado.ioloop
import tornado.web
from .webhandler.DownloadHandler import DownloadHandler
from .webhandler.OverviewHandler import OverviewHandler
from .webhandler.GradingPreviewMailsHandler import GradingPreviewMailsHandler
from .webhandler.GradingSendMailsHandler import GradingSendMailsHandler
from .webhandler.SheetCreateHandler import SheetCreateHandler
from .webhandler.SheetDeleteHandler import SheetDeleteHandler
from .webhandler.SheetEditEndHandler import SheetEditEndHandler
from .webhandler.SheetHandler import SheetHandler
from .webhandler.SheetRestoreHandler import SheetRestoreHandler
from .webhandler.SheetsHandler import SheetsHandler
from .webhandler.StudentHandler import StudentHandler
from .webhandler.StudentsHandler import StudentsHandler
from .webhandler.SubmissionAssignHandler import SubmissionAssignHandler
from .webhandler.SubmissionDetailHandler import SubmissionDetailHandler
from .webhandler.SubmissionGradeAllHandler import SubmissionGradeAllHandler
from .webhandler.SubmissionsListAllHandler import SubmissionsListAllHandler
from .webhandler.SubmissionsListCurrentHandler import SubmissionsListCurrentHandler
from .webhandler.SubmissionsListUnfinishedHandler import SubmissionsListUnfinishedHandler
from .webhandler.SubmissionStudentSheetHandler import SubmissionStudentSheetHandler
from .webhandler.TaskCreateHandler import TaskCreateHandler
from .webhandler.TaskDeleteHandler import TaskDeleteHandler
from .webhandler.TaskEditHandler import TaskEditHandler
from .webhandler.UpdateDatabaseHandler import UpdateDatabaseHandler
from .webhandler.contact import (
ContactCraftHandler,
ContactSendHandler,
ContactAllCraftHandler,
ContactAllSendHandler,
)
from .webhandler.merge import (
MergeSelectHandler,
MergePreviewHandler,
MergeHandler,
)
from . import database
class KorrekturApp(tornado.web.Application):
realm = 'netsec Uebungsabgabesystem'
def __init__(self, config, handlers):
super(KorrekturApp, self).__init__(handlers)
for handler in handlers:
handler[1].config = config
self.config = config
@property
def users(self):
return self.config('korrektoren')
def web_main(config):
try:
mainloop(config)
except BaseException as e:
logging.exception(e)
raise
def mainloop(config):
application = KorrekturApp(config, [
(r"/", OverviewHandler),
(r"/sheets", SheetsHandler),
(r"/sheet/create", SheetCreateHandler),
(r"/sheet/([0-9]+)/delete", SheetDeleteHandler),
(r"/sheet/([0-9]+)/editend", SheetEditEndHandler),
(r"/sheet/([0-9]+)/restore", SheetRestoreHandler),
(r"/sheet/([0-9]+)/task/create", TaskCreateHandler),
(r"/task/([0-9]+)/edit", TaskEditHandler),
(r"/task/([0-9]+)/delete", TaskDeleteHandler),
(r"/sheet/.*", SheetHandler),
(r"/students", StudentsHandler),
(r"/student/(.*)", StudentHandler),
(r"/submissions", SubmissionsListCurrentHandler),
(r"/submissions/all", SubmissionsListAllHandler),
(r"/submissions/unfinished", SubmissionsListUnfinishedHandler),
(r"/submission/([0-9]+)", SubmissionDetailHandler),
(r"/submission/([0-9]+)/([0-9]+)", SubmissionStudentSheetHandler),
(r"/submission/([0-9]+)/grade_all", SubmissionGradeAllHandler),
(r"/submission/([0-9]+)/assign", SubmissionAssignHandler),
(r"/grading/mails/preview", GradingPreviewMailsHandler),
(r"/grading/mails/send_all", GradingSendMailsHandler),
(r"/merge/([0-9]+)/select", MergeSelectHandler),
(r"/merge/([0-9]+)/preview", MergePreviewHandler),
(r"/merge/([0-9]+)/merge", MergeHandler),
(r"/contact/([0-9]+)", ContactCraftHandler),
(r"/contact/([0-9]+)/send", ContactSendHandler),
(r"/contact/all", ContactAllCraftHandler),
(r"/contact/all/send", ContactAllSendHandler),
(r"/download/(.*)", DownloadHandler),
(r"/updb", UpdateDatabaseHandler),
(r"/static/(.*)", tornado.web.StaticFileHandler, {
"path": os.path.join(config.module_path, "static")
}),
])
application.db = database.Database(config)
port = config('httpd.port')
application.listen(port)
logging.debug("Web server started on port %i.", port)
tornado.ioloop.IOLoop.instance().start()
|
hhucn/netsec-uebungssystem
|
netsecus/korrekturserver.py
|
Python
|
mit
| 4,455 | 0.000673 |
#!/usr/local/bin/python3.5
"""
Author: Maximilian Golub
Simulate the chain parsing and replying to a http request to test
the ESP8266
"""
import serial
import socket
import re
import traceback
import sys
import subprocess
import time
PORT = '/dev/cu.usbserial-FTZ29WSV' #OSX
#PORT = 'COM3' # If on windows
BAUD = 115200
def loop():
"""
Loop that runs forever, reading from the
serial connection waiting for special sequences indicating
TCP data.
:return:
"""
with serial.Serial(PORT, BAUD) as serial_socket:
pound_count = 0
data = ''
start = 0
while True:
#Read a character at a time.
#Yes this is awful and terrible
new_data = serial_socket.read(1)
try:
decode_data = new_data.decode('ascii')
#print(decode_data, end="")
if decode_data:
if decode_data == '\a':
pound_count += 1
if pound_count >= 3:
start = 1
print(pound_count)
else:
pound_count = 0
if decode_data == '\b':
print("***Parsing data!!!****")
start = 0
print(data)
parse(data, serial_socket)
pound_count = 0
data = ''
else:
if start:
data += decode_data
except UnicodeDecodeError:
pass
def parse(data, serial_socket):
"""
Parse the data coming over the serial connection. The data should
be the GET/POST whatever request from the Wifi device attached to the
ESP8266. Looks for the Host header, trys to get the host+port with regex.
:param data:
:param serial_socket:
:return:
"""
try:
host_match = re.search('Host: (\S+)\\r\\n', data)
if host_match:
host = host_match.group(1)
#print(host)
try:
host, port = host.split()
except ValueError:
port = 80
if host == "192.168.1.1:8080": # Special case to test basic functionality level.
with open('hackaday.txt', 'r') as d:
data = d.read(100)
serial_socket.write('\b\b\b'.encode('utf-8'))
while data:
serial_socket.write(data.encode('utf-8'))
data = d.read(100)
if chr(27).encode() in data.encode():
print("OH SHIT")
time.sleep(.01)
serial_socket.write(chr(27).encode())
else: #Connect a socket as a client, then return that over the uart.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
s.connect((host, int(port)))
totalsent = 0
while totalsent < len(data): #Send all of our data
sent = s.send(data[totalsent:].encode())
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent = totalsent + sent
result = s.recv(100) #Recieve data in 100 byte chunks, just like the special case.
if result:
serial_socket.write('\b\b\b'.encode('utf-8')) #Write special start code sequence
while (len(result) > 0):
serial_socket.write(result)
time.sleep(.01) #Keep the ESP8266 from sploding
result = s.recv(100)
serial_socket.write(chr(27).encode())
s.close()
except Exception as e:
print(e)
traceback.print_exc(file=sys.stdout)
if __name__ == '__main__':
loop()
|
mgolub2/Breadcrumb
|
ESP8266/chain_simulator.py
|
Python
|
gpl-3.0
| 4,013 | 0.005981 |
from django.core.management.base import BaseCommand
from webserver.codemanagement.models import TeamClient
import os
import re
import tempfile
import subprocess
class Command(BaseCommand):
help = 'Attempts to update all repositories by pulling from bases'
def handle(self, *args, **options):
# A list of tuples: (message, repo_directory, stderr)
errors = []
# A list of tuples: (team name, git-show output)
successes = []
for client in TeamClient.objects.all():
directory = tempfile.mkdtemp(prefix='GRETA_UPDATE')
repo_name = os.path.basename(client.repository.name)
repo_name = repo_name.replace(".git", "")
repo_directory = os.path.join(directory, repo_name)
self.stdout.write("Updating {0}'s repo...\n".format(client.team.name))
####################
# Clone
####################
self.stdout.write("\tCloning...\n")
clone = subprocess.Popen(["git", "clone", client.repository.path],
cwd=directory,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = clone.communicate()
if clone.returncode != 0:
errors.append(
("Failed to clone {0}'s repo".format(client.team.name),
repo_directory,
out + err)
)
continue
####################
# Pull
####################
self.stdout.write("\tPulling...\n")
# Use default merge-recursive strategy
pull = subprocess.Popen(["git", "pull",
client.base.repository.path],
cwd=repo_directory,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = pull.communicate()
if pull.returncode != 0:
errors.append(
("Failed to pull into {0}'s repo".format(client.team.name),
repo_directory,
out + err)
)
continue
####################
# Push
####################
self.stdout.write("\tPushing...\n")
push = subprocess.Popen(["git", "push"],
cwd=repo_directory,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = push.communicate()
if push.returncode != 0:
errors.append(
("Failed to push to {0}'s repo".format(client.team.name),
repo_directory,
out + err)
)
continue
####################
# Show
####################
self.stdout.write("\tGetting show...\n")
show = subprocess.Popen(["git", "show", "--stat"],
cwd=repo_directory,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = show.communicate()
successes.append((client.team.name, out + err))
if successes:
self.stdout.write("\n\nSuccessfully updated some team repos\n")
for name, show in successes:
self.stdout.write("\t - {0}\n".format(name))
for line in show.splitlines():
self.stdout.write("\t\t" + line + "\n")
self.stdout.write("\n\n")
if errors:
self.stdout.write("\n\nUnable to update some team repos\n")
for name, directory, stderr in errors:
self.stdout.write("\t - {0} ({1})\n".format(name, directory))
for line in stderr.splitlines():
self.stdout.write("\t\t" + line + "\n")
self.stdout.write("\n\n")
if not errors and not successes:
self.stdout.write("No team repos to update\n")
|
siggame/webserver
|
webserver/codemanagement/management/commands/update_repos.py
|
Python
|
bsd-3-clause
| 4,248 | 0.000235 |
from office365.sharepoint.base_entity_collection import BaseEntityCollection
from office365.sharepoint.files.checkedOutFile import CheckedOutFile
class CheckedOutFileCollection(BaseEntityCollection):
def __init__(self, context, resource_path=None):
super(CheckedOutFileCollection, self).__init__(context, CheckedOutFile, resource_path)
|
vgrem/Office365-REST-Python-Client
|
office365/sharepoint/files/checkedOutFileCollection.py
|
Python
|
mit
| 350 | 0.002857 |
import csv
from meerkat_abacus.config import config
def data_types(param_config=config):
with open(param_config.config_directory + param_config.country_config["types_file"],
"r", encoding='utf-8',
errors="replace") as f:
DATA_TYPES_DICT = [_dict for _dict in csv.DictReader(f)]
return DATA_TYPES_DICT
def data_types_for_form_name(form_name, param_config=config):
return [data_type for data_type in data_types(param_config=param_config) if form_name == data_type['form']]
DATA_TYPES_DICT = data_types()
|
who-emro/meerkat_abacus
|
meerkat_abacus/util/data_types.py
|
Python
|
mit
| 555 | 0.009009 |
import logging
from time import strftime
def closed():
logging.info('Headlights process stopped')
def criterr(errortext):
logging.critical('A fatal error occured :: ' + errortext)
exit()
def err(errortext):
logging.error('An error occured :: ' + errortext)
def warn(errortext):
logging.warning(errortext)
def inf(errortext):
logging.info(errortext)
def debug(errortext):
logging.debug(errortext)
|
mashedkeyboard/Headlights
|
handlers.py
|
Python
|
gpl-3.0
| 429 | 0.016317 |
"""
File: <Sin2_plus_cos2>
Copyright (c) 2016 <Lauren Graziani>
License: MIT
<debugging a program>
"""
"""
# a
from math import sin, cos #need to import pi from math
x = pi/4
1_val = math.sin^2(x) + math.cos^2(x) #can't start a variable with a number, powers are written by **
print 1_VAL
"""
# a debugged
from math import sin, cos, pi
x = pi / 4
val1 = sin(x) ** 2 + cos(x) ** 2
print val1
"""
# b
v0 = 3 m/s #get rid of m/s
t = 1 s #get rid of s
a = 2 m/s**2 # **2 should come right after 2, get rid of m/s
s = v0.t + 0,5.a.t**2 #v0.t should be v0*2, change comma to period and periods to *
print s
"""
# b debugged
v0 = 3
t = 1
a = 2 ** 2
s = v0*t + 0.5*a*t**2
print s
#c
"""
a = 3,3 b = 5,3
a2 = a**2
b2 = b**2
eq1_sum = a2 + 2ab + b2
eq2_sum = a2 - (2ab + b2
eq1_pow = (a+b)**2
eq2_pow = (a-b)**2
print 'First equation: %g = %g', % (eq1_sum, eq1_pow)
print 'Second equation: %h = %h', % (eq2_pow, eq2_pow)
# c debugged (cofused???)
a = 3,3
b=5,3
a2 = a**2
b2 = b**2
eq1_sum = a2 + (2*a*b) + b2
eq2_sum = a2 - (2*a*b) + b2
eq1_pow = (a+b)**2
eq2_pow = (a-b)**2
print "First equation: %g = %g" % (eq1_sum, eq1_pow)
print "Second equation: %h = %h" % (eq2_pow, eq2_pow)
"""
|
chapman-cpsc-230/hw1-grazi102
|
sin2_plus_cos2.py
|
Python
|
mit
| 1,198 | 0.004174 |
class A(object):
def __init__(self, bar):
self._x = 1 ; self._bar = bar
def __getX(self):
return self._x
def __setX(self, x):
self._x = x
def __delX(self):
pass
x1 = property(__getX, __setX, __delX, "doc of x1")
x2 = property(__setX) # should return
x3 = property(__getX, __getX) # should not return
x4 = property(__getX, fdel=__getX) # should not return
x5 = property(__getX, doc=123) # bad doc
x6 = property(lambda self: self._x)
x7 = property(lambda self: self._x, lambda self: self._x) # setter should not return
@property
def foo(self):
return self._x
@foo.setter # ignored in 2.5
def foo(self, x):
self._x = x
@foo.deleter # ignored in 2.5
def foo(self):
pass
@property
def boo(self):
return self._x
@boo.setter
def boo1(self, x): # ignored in 2.5
self._x = x
@boo.deleter
def boo2(self): # ignored in 2,5
pass
@property
def moo(self): # should return
pass
@moo.setter
def foo(self, x):
return 1 # ignored in 2.5
@foo.deleter
def foo(self):
return self._x # ignored in 2.5
@qoo.setter # unknown qoo is reported in ref inspection
def qoo(self, v):
self._x = v
@property
def bar(self):
return None
class Ghostbusters(object):
def __call__(self):
return "Who do you call?"
gb = Ghostbusters()
class B(object):
x = property(gb) # pass
y = property(Ghostbusters()) # pass
z = property(Ghostbusters) # pass
class Eternal(object):
def give(self):
while True:
yield 1
def giveAndTake(self):
x = 1
while True:
x = (yield x)
one = property(give) # should pass
anything = property(giveAndTake) # should pass
|
caot/intellij-community
|
python/testData/inspections/PyPropertyDefinitionInspection25/src/prop_test.py
|
Python
|
apache-2.0
| 1,700 | 0.042941 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-25 12:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('venue', '0005_auto_20170916_0701'),
]
operations = [
migrations.CreateModel(
name='EventCalander',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='Default Event', max_length=200)),
('calander_id', models.TextField()),
('active', models.BooleanField(default=True)),
],
),
]
|
tornadoalert/kmcoffice
|
venue/migrations/0006_eventcalander.py
|
Python
|
gpl-3.0
| 725 | 0.002759 |
"""Given (x,wave,matrices, slit_profile), extract the flux from each order. For
readability, we keep this separate from the simulator.... but the simulator is
required in order to run this.
To run, create a simulated fits file (e.g. "test_blue.fits") using ghostsim then:
blue_high = pyghost.extract.Extractor('blue', 'high')
flux,var = blue_high.two_d_extract("test_blue.fits")
plt.plot(blue_high.w_map[0,:], flux[0,:,0])
"""
from __future__ import division, print_function
import ghostsim
import numpy as np
import matplotlib.pyplot as plt
try:
import pyfits
except:
import astropy.io.fits as pyfits
import pdb
class Extractor():
"""A class for each arm of the spectrograph. The initialisation function takes a
single string representing the configuration. For GHOST, it can be "red" or "blue".
The extraction is defined by 3 key parameters: an "x_map", which is equivalent to
2dFDR's tramlines and contains a physical x-coordinate for every y (dispersion direction)
coordinate and order, and a "w_map", which is the wavelength corresponding to every y
(dispersion direction) coordinate and order. """
def __init__(self,arm, mode):
self.sim = ghostsim.Arm(arm)
self.x_map,self.w_map,self.blaze,self.matrices = self.sim.spectral_format_with_matrix()
#Fill in the slit dimensions in "simulator pixel"s. based on if we are in the
#high or standard resolution mode.
if mode == 'high':
self.mode = mode
self.lenslet_width = self.sim.lenslet_high_size
self.nl = 28
## Set default profiles - object, sky and reference
fluxes = np.zeros( (self.nl,3) )
fluxes[2:21,0] = 0.37
fluxes[8:15,0] = 0.78
fluxes[11,0] = 1.0
#NB if on the following line, fluxes[2:,1]=1.0 is set, sky will be
#subtracted automatically.
fluxes[2+19:,1]=1.0
fluxes[0,2]=1.0
self.define_profile(fluxes)
elif mode == 'std':
self.mode = mode
self.lenslet_width = self.sim.lenslet_std_size
self.nl = 17
## Set default profiles - object 1, sky and object 2
fluxes = np.zeros( (self.nl,3) )
fluxes[0:7,0] = 1.0
fluxes[7:10,1] = 1.0
fluxes[10:,2] = 1.0
self.define_profile(fluxes)
#Set some default pixel offsets for each lenslet, as used for a square lenslet profile
ny = self.x_map.shape[1]
nm = self.x_map.shape[0]
pix_offset_ix = np.append(np.append([0],np.arange(1,self.nl).repeat(2)),self.nl)
self.square_offsets = np.empty( (2*self.nl,nm) )
# The [0,0] component of "matrices" measures the size of a detector pixel in the
# simulated slit image space. i.e. slitmicrons/detpix.
for i in range(self.nl):
self.square_offsets[:,i] = (pix_offset_ix - self.nl/2.0) * self.lenslet_width / self.matrices[i,self.x_map.shape[1]//2,0,0]
self.sim_offsets = np.empty( (self.sim.im_slit_sz,nm) )
im_slit_pix_in_microns = (np.arange(self.sim.im_slit_sz) - self.sim.im_slit_sz/2.0) * self.sim.microns_pix
for i in range(nm):
self.sim_offsets[:,i] = im_slit_pix_in_microns / self.matrices[i,self.x_map.shape[1]//2,0,0]
#To aid in 2D extraction, let's explicitly compute the y offsets corresponding to these x offsets...
#The "matrices" map pixels back to slit co-ordinates.
self.slit_tilt = np.zeros( (nm,ny) )
for i in range(nm):
for j in range(ny):
invmat = np.linalg.inv( self.matrices[i,j] )
#What happens to the +x direction?
x_dir_map = np.dot(invmat,[1,0])
self.slit_tilt[i,j] = x_dir_map[1]/x_dir_map[0]
def define_profile(self,fluxes):
""" Manually define the slit profile as used in lenslet extraction. As this is
a low-level function, all lenslets must be defined. e.g. by convention, for the
star lenslets of the high resolution mode, lenslets 0,1 and 21 through 27 would
be zero. Also """
if fluxes.shape[0] != self.nl:
print("Error: {0:s} resolution mode must have {1:d} lenslets".format(self.mode,self.nl))
else:
self.square_profile = np.empty( (fluxes.shape[0]*2, fluxes.shape[1]) )
self.sim_profile = np.empty( (self.sim.im_slit_sz, fluxes.shape[1]) )
for i in range(fluxes.shape[1]):
self.square_profile[:,i] = np.array(fluxes[:,i]).repeat(2)
im_slit=self.sim.make_lenslets(fluxes=fluxes[:,i], mode=self.mode)
self.sim_profile[:,i] = np.sum(im_slit, axis=0)
def one_d_extract(self, data=[], file='', badpix=[], lenslet_profile='sim', rnoise=3.0):
""" Extract flux by integrating down columns (the "y" direction), using an
optimal extraction method.
Given that some of this code is in common with two_d_extract, the routines could
easily be merged... however that would make one_d_extract less readable.
Parameters
----------
data: numpy array (optional)
Image data, transposed so that dispersion is in the "y" direction. Note that
this is the transpose of a conventional echellogram. Either data or file
must be given
file: string (optional)
A fits file with conventional row/column directions containing the data to be
extracted.
lenslet_profile: 'square' or 'sim'
Shape of the profile of each fiber as used in the extraction. For a final
implementation, 'measured' should be a possibility. 'square' assigns each
pixel uniquely to a single lenslet. For testing only
rnoise: float
The assumed readout noise.
WARNING: Binning not implemented yet"""
if len(data)==0:
if len(file)==0:
print("ERROR: Must input data or file")
else:
#Transpose the data from the start.
data = pyfits.getdata(file).T
ny = self.x_map.shape[1]
nm = self.x_map.shape[0]
nx = self.sim.szx
#Number of "objects"
no = self.square_profile.shape[1]
extracted_flux = np.zeros( (nm,ny,no) )
extracted_var = np.zeros( (nm,ny,no) )
#Assuming that the data are in photo-electrons, construct a simple model for the
#pixel inverse variance.
pixel_inv_var = 1.0/(np.maximum(data,0) + rnoise**2)
pixel_inv_var[badpix]=0.0
#Loop through all orders then through all y pixels.
for i in range(nm):
print("Extracting order: {0:d}".format(i))
#Based on the profile we're using, create the local offsets and profile vectors
if lenslet_profile == 'square':
offsets = self.square_offsets[:,i]
profile = self.square_profile
elif lenslet_profile == 'sim':
offsets = self.sim_offsets[:,i]
profile = self.sim_profile
nx_cutout = 2*int( (np.max(offsets) - np.min(offsets))/2 ) + 2
phi = np.empty( (nx_cutout,no) )
for j in range(ny):
#Check for NaNs
if self.x_map[i,j] != self.x_map[i,j]:
extracted_var[i,j,:] = np.nan
continue
#Create our column cutout for the data and the PSF
x_ix = int(self.x_map[i,j]) - nx_cutout//2 + np.arange(nx_cutout,dtype=int) + nx//2
for k in range(no):
phi[:,k] = np.interp(x_ix - self.x_map[i,j] - nx//2, offsets, profile[:,k])
phi[:,k] /= np.sum(phi[:,k])
#Deal with edge effects...
ww = np.where( (x_ix >= nx) | (x_ix < 0) )[0]
x_ix[ww]=0
phi[ww,:]=0.0
#Cut out our data and inverse variance.
col_data = data[j,x_ix]
col_inv_var = pixel_inv_var[j,x_ix]
#Fill in the "c" matrix and "b" vector from Sharp and Birchall equation 9
#Simplify things by writing the sum in the computation of "b" as a matrix
#multiplication. We can do this because we're content to invert the
#(small) matrix "c" here. Equation 17 from Sharp and Birchall
#doesn't make a lot of sense... so lets just calculate the variance in the
#simple explicit way.
col_inv_var_mat = np.reshape(col_inv_var.repeat(no), (nx_cutout,no) )
b_mat = phi * col_inv_var_mat
c_mat = np.dot(phi.T,phi*col_inv_var_mat)
pixel_weights = np.dot(b_mat,np.linalg.inv(c_mat))
extracted_flux[i,j,:] = np.dot(col_data,pixel_weights)
extracted_var[i,j,:] = np.dot(1.0/np.maximum(col_inv_var,1e-12),pixel_weights**2)
return extracted_flux, extracted_var
def two_d_extract(self, file='', data=[], badpix=[], lenslet_profile='sim', rnoise=3.0, deconvolve=True):
""" Extract using 2D information. The lenslet model used is a collapsed profile,
in 1D but where we take into account the slit shear/rotation by interpolating this
1D slit profile to the nearest two pixels along each row (y-axis in code).
One key difference to Sharp and Birchall is that c_kj (between equations 8 and 9)
is the correct normalisation for a (fictitious) 1-pixel wide PSF centered exactly
on a pixel, but not for a continuum. We normalise correctly for a continuum by
having one of the \phi functions being one-pixel wide along the slit, and the
other being unbounded in the dispersion direction.
Note that the input data has to be the transpose of a conventional echellogram
TODO:
1) Neaten the approximate matrix inverse square root
Parameters
----------
data: numpy array (optional)
Image data, transposed so that dispersion is in the "y" direction. Note that
this is the transpose of a conventional echellogram. Either data or file
must be given
file: string (optional)
A fits file with conventional row/column directions containing the data to be
extracted.
lenslet_profile: 'square' or 'sim'
Shape of the profile of each fiber as used in the extraction. For a final
implementation, 'measured' should be a possibility. 'square' assigns each
pixel uniquely to a single lenslet. For testing only
rnoise: float
The assumed readout noise.
deconvolve: bool
Do we deconvolve so that neighboring extracted spectral points
are statistically independent? This is an approximate deconvolution (a linear
function of 5 neighboring pixels) so is reasonably robust. """
if len(data)==0:
if len(file)==0:
print("ERROR: Must input data or file")
else:
#Transpose the data from the start.
data = pyfits.getdata(file).T
ny = self.x_map.shape[1]
nm = self.x_map.shape[0]
nx = self.sim.szx
#Number of "objects"
no = self.square_profile.shape[1]
extracted_flux = np.zeros( (nm,ny,no) )
extracted_var = np.zeros( (nm,ny,no) )
extracted_covar = np.zeros( (nm,ny-1,no) )
#Assuming that the data are in photo-electrons, construct a simple model for the
#pixel inverse variance.
pixel_inv_var = 1.0/(np.maximum(data,0) + rnoise**2)
pixel_inv_var[badpix]=0.0
#Loop through all orders then through all y pixels.
for i in range(nm):
print("Extracting order index: {0:d}".format(i))
#Based on the profile we're using, create the local offsets and profile vectors
if lenslet_profile == 'sim':
offsets = self.sim_offsets[:,i]
profile = self.sim_profile
else:
print("Only sim lenslet profile available for 2D extraction so far...")
raise userwarning
nx_cutout = 2*int( (np.max(offsets) - np.min(offsets))/2 ) + 2
ny_cutout = 2*int(nx_cutout * np.nanmax(np.abs(self.slit_tilt)) / 2) + 3
for j in range(ny):
phi = np.zeros( (ny_cutout,nx_cutout,no) )
phi1d = np.zeros( (ny_cutout,nx_cutout,no) )
#Check for NaNs
if self.x_map[i,j] != self.x_map[i,j]:
extracted_var[i,j,:] = np.nan
continue
#Create our column cutout for the data and the PSF
x_ix = int(self.x_map[i,j]) - nx_cutout//2 + np.arange(nx_cutout,dtype=int) + nx//2
y_ix = j + np.arange(ny_cutout, dtype=int) - ny_cutout//2
for k in range(no):
x_prof = np.interp(x_ix - self.x_map[i,j] - nx//2, offsets, profile[:,k])
y_pix = (x_ix - self.x_map[i,j] - nx//2) * self.slit_tilt[i,j] + ny_cutout//2
frac_y_pix = y_pix - y_pix.astype(int)
subx_ix = np.arange(nx_cutout,dtype=int)
phi[y_pix.astype(int),subx_ix,k] = (1-frac_y_pix)*x_prof
phi[y_pix.astype(int)+1,subx_ix,k] = frac_y_pix*x_prof
phi[:,:,k] /= np.sum(phi[:,:,k])
x_prof /= np.sum(x_prof)
phi1d[:,:,k] = np.tile(x_prof,ny_cutout).reshape( (ny_cutout, nx_cutout) )
#Deal with edge effects...
ww = np.where( (x_ix >= nx) | (x_ix < 0) )[0]
x_ix[ww]=0
phi[:,ww,:]=0.0
phi1d[:,ww,:]=0.0
ww = np.where( (y_ix >= ny) | (y_ix < 0) )[0]
y_ix[ww]=0
phi[ww,:,:]=0.0
xy = np.meshgrid(y_ix, x_ix, indexing='ij')
#Cut out our data and inverse variance.
col_data = data[xy].flatten()
col_inv_var = pixel_inv_var[xy].flatten()
#Fill in the "c" matrix and "b" vector from Sharp and Birchall equation 9
#Simplify things by writing the sum in the computation of "b" as a matrix
#multiplication. We can do this because we're content to invert the
#(small) matrix "c" here. Equation 17 from Sharp and Birchall
#doesn't make a lot of sense... so lets just calculate the variance in the
#simple explicit way.
col_inv_var_mat = np.reshape(col_inv_var.repeat(no), (ny_cutout*nx_cutout,no) )
phi = phi.reshape( (ny_cutout*nx_cutout,no) )
phi1d = phi1d.reshape( (ny_cutout*nx_cutout,no) )
b_mat = phi * col_inv_var_mat
c_mat = np.dot(phi.T,phi1d*col_inv_var_mat)
pixel_weights = np.dot(b_mat,np.linalg.inv(c_mat))
# if (j==1000):
# pdb.set_trace()
extracted_flux[i,j,:] = np.dot(col_data,pixel_weights)
extracted_var[i,j,:] = np.dot(1.0/np.maximum(col_inv_var,1e-12),pixel_weights**2)
if (j > 0):
extracted_covar[i,j-1,:] = np.dot(1.0/np.maximum(col_inv_var,1e-12),pixel_weights* \
np.roll(last_pixel_weights,-nx_cutout, axis=0))
last_pixel_weights = pixel_weights.copy()
# if (j > 591):
# pdb.set_trace()
if (deconvolve):
#Create the diagonals of the matrix Q gradually, using the Taylor approximation for
#the matrix inverse.
#(Bolton and Schlegel 2009, equation 10)
#D = diag(C)
#A = D^{-1/2} (C-D) D^{-1/2}, so C = D^{1/2}(I + A)D^{1/2}
#Then if Q = (I - 1/2 A + 3/8 A^2) D^{-1/2}
#... then C^{-1} = QQ, approximately.
#Note that all of this effort doesn't really seem to achieve much at all in practice...
#an extremely marginal improvement in resolution... but at least formal pixel-to-pixel
#data independence is returned.
extracted_sig = np.sqrt(extracted_var)
a_diag_p1 = extracted_covar/extracted_sig[:,:-1,:]/extracted_sig[:,1:,:]
# a_diag_m1 = extracted_covar/extracted_var[:,1:,:]
Q_diag = np.ones( (nm,ny,no) )
Q_diag[:,:-1,:] += 3/8.0*a_diag_p1**2
Q_diag[:,1:,:] += 3/8.0*a_diag_p1**2
# Q_diag[:,:-1,:] += 3/8.0*a_diag_p1*a_diag_m1
# Q_diag[:,1:,:] += 3/8.0*a_diag_p1*a_diag_m1
Q_diag /= extracted_sig
extracted_sqrtsig = np.sqrt(extracted_sig)
Q_diag_p2 = 3/8.0*a_diag_p1[:,:-1,:]*a_diag_p1[:,1:,:]/extracted_sqrtsig[:,2:,:]/extracted_sqrtsig[:,:-2,:]
# Q_diag_m2 = 3/8.0*a_diag_m1[:,:-1,:]*a_diag_m1[:,1:,:]/extracted_sig[:,:-2,:]
# Q_diag_m1 = -0.5*a_diag_m1/extracted_sig[:,:-1,:]
Q_diag_p1 = -0.5*a_diag_p1/extracted_sqrtsig[:,1:,:]/extracted_sqrtsig[:,:-1,:]
#The approximation doesn't seem to be quite right, with the ~3% uncertainty on the diagonal of cinv, when there should
#only be a ~1% uncertainty (obtained by going to the next term in the Taylor expansion). But pretty close...
#Q = np.diag(Q_diag[0,:,0]) + np.diag(Q_diag_m1[0,:,0],k=-1) + np.diag(Q_diag_p1[0,:,0],k=+1) + np.diag(Q_diag_p2[0,:,0],k=+2) + np.diag(Q_diag_m2[0,:,0],k=-2)
#cinv_approx = np.dot(Q,Q)
#cinv = np.diag(extracted_var[0,:,0]) + np.diag(extracted_covar[0,:,0],k=1) + np.diag(extracted_covar[0,:,0],k=-1)
#cinv = np.linalg.inv(cinv)
#Now we have a sparse matrix with 5 terms. We need to sum down the rows, ignoring the
#edge pixels
# s_vect = Q_diag[:,2:-2,:] + Q_diag_p1[:,1:-2,:] + Q_diag_m1[:,2:-1,:] + Q_diag_p2[:,:-2,:] + Q_diag_m2[:,2:,:]
s_vect = Q_diag.copy()
s_vect[:,:-1,:] += Q_diag_p1
s_vect[:,:-2,:] += Q_diag_p2
s_vect[:,1:,:] += Q_diag_p1
s_vect[:,2:,:] += Q_diag_p2
new_var = 1.0/s_vect**2
new_flux = extracted_flux*Q_diag/s_vect
new_flux[:,:-1,:] += extracted_flux[:,1:,:]*Q_diag_p1/s_vect[:,1:,:]
new_flux[:,:-2,:] += extracted_flux[:,2:,:]*Q_diag_p2/s_vect[:,2:,:]
new_flux[:,1:,:] += extracted_flux[:,:-1,:]*Q_diag_p1/s_vect[:,:-1,:]
new_flux[:,2:,:] += extracted_flux[:,:-2,:]*Q_diag_p2/s_vect[:,:-2,:]
#Fill in the Variance and Flux arrays with NaNs, so that the (not computed) edges
#are undefined.
# new_flux = np.empty_like(extracted_flux)
# new_var = np.empty_like(extracted_var)
# new_flux[:,:,:]=np.nan
# new_var[:,:,:]=np.nan
#Now fill in the arrays.
# new_var[:,2:-2,:] = 1.0/s_vect**2
# new_flux[:,2:-2,:] = extracted_flux[:,2:-2,:]*Q_diag[:,2:-2,:]/s_vect
#
# new_flux[:,2:-2,:] += extracted_flux[:,1:-3,:]*Q_diag_p1[:,1:-2,:]/s_vect
# new_flux[:,2:-2,:] += extracted_flux[:,3:-1,:]*Q_diag_p1[:,2:-1,:]/s_vect
# new_flux[:,2:-2,:] += extracted_flux[:,:-4,:] *Q_diag_p2[:,:-2,:]/s_vect
# new_flux[:,2:-2,:] += extracted_flux[:,4:,:] *Q_diag_p2[:,2:,:]/s_vect
return new_flux, new_var
else:
return extracted_flux, extracted_var
|
mikeireland/pyghost
|
pyghost/extract.py
|
Python
|
mit
| 20,005 | 0.024144 |
class IPParseError(Exception):
pass
class ZoneNotFoundError(Exception):
pass
class InvalidInputError(Exception):
pass
|
crustymonkey/r53-dyndns
|
libr53dyndns/errors.py
|
Python
|
gpl-2.0
| 133 | 0.015038 |
# -*- coding: utf-8 -*-
# code for console Encoding difference. Dont' mind on it
import sys
import imp
imp.reload(sys)
try:
sys.setdefaultencoding('UTF8')
except Exception as E:
pass
import testValue
from popbill import CashbillService, PopbillException
cashbillService = CashbillService(testValue.LinkID, testValue.SecretKey)
cashbillService.IsTest = testValue.IsTest
cashbillService.IPRestrictOnOff = testValue.IPRestrictOnOff
cashbillService.UseStaticIP = testValue.UseStaticIP
cashbillService.UseLocalTimeYN = testValue.UseLocalTimeYN
'''
연동회원의 현금영수증 API 서비스 과금정보를 확인합니다.
- https://docs.popbill.com/cashbill/python/api#GetChargeInfo
'''
try:
print("=" * 15 + " 과금정보 확인 " + "=" * 15)
# 팝빌회원 사업자번호
CorpNum = testValue.testCorpNum
# 팝빌회원 아이디
UserID = testValue.testUserID
response = cashbillService.getChargeInfo(CorpNum, UserID)
print(" unitCost (발행단가) : %s" % response.unitCost)
print(" chargeMethod (과금유형) : %s" % response.chargeMethod)
print(" rateSystem (과금제도) : %s" % response.rateSystem)
except PopbillException as PE:
print("Exception Occur : [%d] %s" % (PE.code, PE.message))
|
linkhub-sdk/popbill.cashbill.example.py
|
getChargeInfo.py
|
Python
|
mit
| 1,259 | 0.001747 |
# fmt: off
"""
This file holds code for a Distributed Pytorch + Tune page in the docs.
FIXME: We switched our code formatter from YAPF to Black. Check if we can enable code
formatting on this module and update the paragraph below. See issue #21318.
It ignores yapf because yapf doesn't allow comments right after code blocks,
but we put comments right after code blocks to prevent large white spaces
in the documentation.
"""
import torch
import torch.nn as nn
from ray.tune.utils import merge_dicts
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
import ray
from ray import tune
from ray.util.sgd.torch import TorchTrainer, TrainingOperator
from ray.util.sgd.utils import BATCH_SIZE
from ray.util.sgd.torch.examples.train_example import LinearDataset
def model_creator(config):
return nn.Linear(1, 1)
def optimizer_creator(model, config):
"""Returns optimizer."""
return torch.optim.SGD(model.parameters(), lr=config.get("lr", 1e-4))
def data_creator(config):
"""Returns training dataloader, validation dataloader."""
train_dataset = LinearDataset(2, 5)
val_dataset = LinearDataset(2, 5, size=400)
train_loader = DataLoader(train_dataset, batch_size=config[BATCH_SIZE])
validation_loader = DataLoader(val_dataset, batch_size=config[BATCH_SIZE])
return train_loader, validation_loader
def scheduler_creator(optimizer, config):
"""Returns scheduler. We are using a ReduceLROnPleateau scheduler."""
scheduler = ReduceLROnPlateau(optimizer, mode="min")
return scheduler
# __torch_tune_example__
def tune_example(operator_cls, num_workers=1, use_gpu=False):
TorchTrainable = TorchTrainer.as_trainable(
training_operator_cls=operator_cls,
num_workers=num_workers,
use_gpu=use_gpu,
config={BATCH_SIZE: 128}
)
analysis = tune.run(
TorchTrainable,
num_samples=3,
config={"lr": tune.grid_search([1e-4, 1e-3])},
stop={"training_iteration": 2},
verbose=1)
return analysis.get_best_config(metric="val_loss", mode="min")
# __end_torch_tune_example__
# __torch_tune_manual_lr_example__
def tune_example_manual(operator_cls, num_workers=1, use_gpu=False):
def step(trainer, info: dict):
"""Define a custom training loop for tune.
This is needed because we want to manually update our scheduler.
"""
train_stats = trainer.train(profile=True)
validation_stats = trainer.validate(profile=True)
# Manually update our scheduler with the given metric.
trainer.update_scheduler(metric=validation_stats["val_loss"])
all_stats = merge_dicts(train_stats, validation_stats)
return all_stats
TorchTrainable = TorchTrainer.as_trainable(
override_tune_step=step,
training_operator_cls=operator_cls,
num_workers=num_workers,
use_gpu=use_gpu,
scheduler_step_freq="manual",
config={BATCH_SIZE: 128}
)
analysis = tune.run(
TorchTrainable,
num_samples=3,
config={"lr": tune.grid_search([1e-4, 1e-3])},
stop={"training_iteration": 2},
verbose=1)
return analysis.get_best_config(metric="val_loss", mode="min")
# __end_torch_tune_manual_lr_example__
def get_custom_training_operator(lr_reduce_on_plateau=False):
return TrainingOperator.from_creators(
model_creator=model_creator, optimizer_creator=optimizer_creator,
data_creator=data_creator, loss_creator=nn.MSELoss,
scheduler_creator=scheduler_creator if lr_reduce_on_plateau
else None)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--address",
type=str,
help="the address to use for Ray")
parser.add_argument(
"--server-address",
type=str,
default=None,
required=False,
help="The address of server to connect to if using "
"Ray Client.")
parser.add_argument(
"--num-workers",
"-n",
type=int,
default=1,
help="Sets number of workers for training.")
parser.add_argument(
"--use-gpu",
action="store_true",
default=False,
help="Enables GPU training")
parser.add_argument(
"--lr-reduce-on-plateau",
action="store_true",
default=False,
help="If enabled, use a ReduceLROnPlateau scheduler. If not set, "
"no scheduler is used."
)
args, _ = parser.parse_known_args()
if args.smoke_test:
ray.init(num_cpus=3)
elif args.server_address:
ray.init(f"ray://{args.server_address}")
else:
ray.init(address=args.address)
CustomTrainingOperator = get_custom_training_operator(
args.lr_reduce_on_plateau)
if not args.lr_reduce_on_plateau:
tune_example(CustomTrainingOperator, num_workers=args.num_workers,
use_gpu=args.use_gpu)
else:
tune_example_manual(CustomTrainingOperator,
num_workers=args.num_workers, use_gpu=args.use_gpu)
|
ray-project/ray
|
python/ray/util/sgd/torch/examples/tune_example.py
|
Python
|
apache-2.0
| 5,267 | 0.00019 |
"""Test MLPerf logging.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import sys
import pytest
from tensorflow_models.mlperf.models.rough.mlp_log import mlp_log
class TestMLPerfLog(object):
"""Test mlperf log."""
def test_format(self):
msg = mlp_log.mlperf_format('foo_key', {'whiz': 'bang'})
parts = msg.split()
assert parts[0] == ':::MLL'
assert float(parts[1]) > 10
assert parts[2] == 'foo_key:'
j = json.loads(' '.join(parts[3:]))
assert j['value'] == {'whiz': 'bang'}
assert j['metadata']['lineno'] == 21
assert 'test_mlp_log' in j['metadata']['file']
if __name__ == '__main__':
sys.exit(pytest.main())
|
mlperf/training_results_v0.7
|
Google/benchmarks/resnet/implementations/resnet-cloud-TF2.0-tpu-v3-32/tf2_common/utils/mlp_log/test_mlp_log.py
|
Python
|
apache-2.0
| 739 | 0.00406 |
# Copyright © 2020, Joseph Berry, Rico Tabor (opendrop.dev@gmail.com)
# OpenDrop is released under the GNU GPL License. You are free to
# modify and distribute the code, but always under the same license
# (i.e. you cannot make commercial derivatives).
#
# If you use this software in your research, please cite the following
# journal articles:
#
# J. D. Berry, M. J. Neeson, R. R. Dagastine, D. Y. C. Chan and
# R. F. Tabor, Measurement of surface and interfacial tension using
# pendant drop tensiometry. Journal of Colloid and Interface Science 454
# (2015) 226–237. https://doi.org/10.1016/j.jcis.2015.05.012
#
# E. Huang, T. Denning, A. Skoufis, J. Qi, R. R. Dagastine, R. F. Tabor
# and J. D. Berry, OpenDrop: Open-source software for pendant drop
# tensiometry & contact angle measurements, submitted to the Journal of
# Open Source Software
#
# These citations help us not only to understand who is using and
# developing OpenDrop, and for what purpose, but also to justify
# continued development of this code and other open source resources.
#
# OpenDrop is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this software. If not, see <https://www.gnu.org/licenses/>.
from enum import IntEnum
from gi.repository import Gdk
class Key(IntEnum):
UNKNOWN = -1
Up = Gdk.KEY_Up
Right = Gdk.KEY_Right
Down = Gdk.KEY_Down
Left = Gdk.KEY_Left
@classmethod
def from_value(cls, value: int) -> 'Key':
try:
return Key(value)
except ValueError:
return Key.UNKNOWN
class Modifier:
SHIFT = int(Gdk.ModifierType.SHIFT_MASK)
class KeyEvent:
def __init__(self, key: Key, modifier: int):
self.key = key
self.modifier = modifier
|
ricotabor/opendrop
|
opendrop/app/keyboard.py
|
Python
|
gpl-2.0
| 1,940 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-13 03:20
from __future__ import unicode_literals
import django.contrib.postgres.fields.ranges
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('climate_data', '0019_auto_20170613_0241'),
]
operations = [
migrations.CreateModel(
name='Annotation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('time_range', django.contrib.postgres.fields.ranges.DateTimeRangeField()),
('comment', models.TextField()),
('sensor', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='climate_data.Sensor')),
('station', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='climate_data.Station')),
],
),
]
|
qubs/data-centre
|
climate_data/migrations/0020_annotation.py
|
Python
|
apache-2.0
| 1,145 | 0.003493 |
#coding:utf-8
bind = 'unix:/var/run/gunicorn.sock'
workers = 4
# you should change this
user = 'root'
# maybe you like error
loglevel = 'debug'
errorlog = '-'
logfile = '/var/log/gunicorn/debug.log'
timeout = 300
secure_scheme_headers = {
'X-SCHEME': 'https',
}
x_forwarded_for_header = 'X-FORWARDED-FOR'
|
chenke91/ckPermission
|
settings.py
|
Python
|
mit
| 313 | 0.003195 |
"""Entities: things that exist in the world."""
from .gfx import GraphicsGroup
from .util import ir
class Entity (object):
"""A thing that exists in the world.
Entity()
Currently, an entity is just a container of graphics.
"""
def __init__ (self):
#: The :class:`World <engine.game.World>` this entity is in. This is
#: set by the world when the entity is added or removed.
self.world = None
#: :class:`GraphicsGroup <engine.gfx.container.GraphicsGroup>`
#: containing the entity's graphics, with ``x=0``, ``y=0``.
self.graphics = GraphicsGroup()
def added (self):
"""Called whenever the entity is added to a world.
This is called after :attr:`world` has been changed to the new world.
"""
pass
def update (self):
"""Called every frame to makes any necessary changes."""
pass
|
ikn/o
|
game/engine/entity.py
|
Python
|
gpl-3.0
| 884 | 0.003394 |
# -*- encoding:utf8 -*-
from model.parser import Parser
from model.googledrive import GoogleDrive
from plugins.base.responsebase import IResponseBase
class Drive(IResponseBase):
def hear_regex(self, **kwargs):
lists = Parser().get_keyword_list(expand=True)
print("Lists : %r" % lists)
return "^({0})$".format("|".join(lists))
def response(self, **kwargs):
drive_kwargs = {
'document_id': Parser().get_document_id(kwargs.get('text')),
'export_type': 'text/plain'
}
return GoogleDrive().retrieve_content(**drive_kwargs)
|
supistar/Botnyan
|
plugins/drive.py
|
Python
|
mit
| 603 | 0 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 0, transform = "Logit", sigma = 0.0, exog_count = 100, ar_order = 0);
|
antoinecarme/pyaf
|
tests/artificial/transf_Logit/trend_MovingMedian/cycle_0/ar_/test_artificial_32_Logit_MovingMedian_0__100.py
|
Python
|
bsd-3-clause
| 263 | 0.087452 |
from __future__ import absolute_import
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ers_backend.settings')
from django.conf import settings
app = Celery('dataset_manager', backend="redis://localhost")
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
dumoulinj/ers
|
ers_backend/ers_backend/celery.py
|
Python
|
mit
| 607 | 0.001647 |
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2017 - 2018 FrostLuma
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from mousey import commands
from mousey.utils import haste, Timer
class Context(commands.Context):
"""
Provides context while executing commands and utility methods.
Attributes
----------
timer : Timer
A timer which measures how long the current command takes to execute.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.timer = Timer()
async def send(self, content=None, *args, **kwargs):
if content is not None and len(content) > 1999:
link = await haste(content, session=self.bot.session)
content = f'Content too long! <{link}>'
return await super().send(content, *args, **kwargs)
# todo
@property
def red_tick(self):
return '\N{NEGATIVE SQUARED CROSS MARK}'
async def ok(self):
await self.message.add_reaction('\N{WHITE HEAVY CHECK MARK}')
@property
def color(self):
return self.me.color
|
FrostLuma/Mousey
|
mousey/bot/context.py
|
Python
|
mit
| 2,070 | 0 |
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
register = template.Library()
### Show an update instance ###
@register.inclusion_tag("feeds/update.html", takes_context=True)
def show_update(context, update):
feed_object = update.feed.feed_object
update_object = update.content_object
icon = object_content = None
feed_object_link = '<a class="update-object" href="%s">%s</a>' % (feed_object.get_absolute_url(), feed_object)
update_object_link = None
if update.action_description.startswith('create'):
icon = 'create'
update_line = update.action_description % feed_object_link
elif update.action_description.startswith('edit'):
icon = 'edit'
update_line = update.action_description % feed_object_link
elif update.action_description.startswith('added to the discussion'):
icon = 'comment'
update_line = update.action_description % feed_object_link
elif update.action_description.startswith('added the paper'):
icon = 'paper'
update_object_link = '<a class="update-object" href="%s">%s</a>' % (update_object.get_absolute_url(), update_object)
update_line = update.action_description % (update_object_link, feed_object_link)
elif update.action_description.startswith('started following'):
icon = 'follow'
update_line = update.action_description % feed_object_link
elif update.action_description.startswith('resolved'):
icon = 'resolve'
update_line = update.action_description % feed_object_link
else:
icon = 'settings'
update_line = update.action_description % feed_object_link
return {'update': update, 'update_user': update.user, 'icon': icon,
'feed_object': feed_object, 'update_object': update_object, 'update_line': update_line,
'update_content': update.update_content, 'STATIC_URL': settings.STATIC_URL}
### Renders the follow button for any object ###
@register.inclusion_tag("feeds/follow_button.html", takes_context=True)
def follow_button(context, content_object, extra_text=None):
user = context['request'].user
feed = content_object.feed
subscription_url = reverse('feeds_subscription', kwargs={'feed_id': feed.id})
if user.is_authenticated():
subscription = feed.is_user_following(user)
else:
subscription = None
return {'subscription_url': subscription_url, 'subscription': subscription,
'extra_text': extra_text, 'feed': feed,
'STATIC_URL': settings.STATIC_URL}
|
caseywstark/colab
|
colab/apps/object_feeds/templatetags/object_feeds_tags.py
|
Python
|
mit
| 2,666 | 0.007127 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import MaxAbsScaler
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("MaxAbsScalerExample")\
.getOrCreate()
# $example on$
dataFrame = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
scaler = MaxAbsScaler(inputCol="features", outputCol="scaledFeatures")
# Compute summary statistics and generate MaxAbsScalerModel
scalerModel = scaler.fit(dataFrame)
# rescale each feature to range [-1, 1].
scaledData = scalerModel.transform(dataFrame)
scaledData.show()
# $example off$
spark.stop()
|
mrchristine/spark-examples-dbc
|
src/main/python/ml/max_abs_scaler_example.py
|
Python
|
apache-2.0
| 1,515 | 0.00066 |
# -*- coding: utf-8 -*-
"""The scwapi package"""
|
eteamin/spell_checker_web_api
|
scwapi/__init__.py
|
Python
|
gpl-3.0
| 49 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-02-15 23:16
from __future__ import unicode_literals
from django.db import migrations
import image_cropping.fields
class Migration(migrations.Migration):
dependencies = [
('gardens', '0023_auto_20180215_2314'),
]
operations = [
migrations.RemoveField(
model_name='maintenancephoto',
name='main',
),
migrations.AddField(
model_name='maintenancephoto',
name='large',
field=image_cropping.fields.ImageRatioField('image', '600x400', adapt_rotation=False, allow_fullsize=False, free_crop=False, help_text=None, hide_image_field=False, size_warning=False, verbose_name='large'),
),
]
|
bengosney/rhgd3
|
gardens/migrations/0024_auto_20180215_2316.py
|
Python
|
gpl-3.0
| 759 | 0.001318 |
# Imports operators dynamically while keeping the package API clean,
# abstracting the underlying modules
from airflow.utils.helpers import import_module_attrs as _import_module_attrs
# These need to be integrated first as other operators depend on them
_import_module_attrs(globals(), {
'check_operator': [
'CheckOperator',
'ValueCheckOperator',
'IntervalCheckOperator',
],
})
_operators = {
'bash_operator': ['BashOperator'],
'python_operator': [
'PythonOperator',
'BranchPythonOperator',
'ShortCircuitOperator',
],
'hive_operator': ['HiveOperator'],
'pig_operator': ['PigOperator'],
'presto_check_operator': [
'PrestoCheckOperator',
'PrestoValueCheckOperator',
'PrestoIntervalCheckOperator',
],
'dagrun_operator': ['TriggerDagRunOperator'],
'dummy_operator': ['DummyOperator'],
'email_operator': ['EmailOperator'],
'hive_to_samba_operator': ['Hive2SambaOperator'],
'mysql_operator': ['MySqlOperator'],
'sqlite_operator': ['SqliteOperator'],
'mysql_to_hive': ['MySqlToHiveTransfer'],
'postgres_operator': ['PostgresOperator'],
'sensors': [
'BaseSensorOperator',
'ExternalTaskSensor',
'HdfsSensor',
'HivePartitionSensor',
'HttpSensor',
'MetastorePartitionSensor',
'S3KeySensor',
'S3PrefixSensor',
'SqlSensor',
'TimeDeltaSensor',
'TimeSensor',
'WebHdfsSensor',
],
'subdag_operator': ['SubDagOperator'],
'hive_stats_operator': ['HiveStatsCollectionOperator'],
's3_to_hive_operator': ['S3ToHiveTransfer'],
'hive_to_mysql': ['HiveToMySqlTransfer'],
'presto_to_mysql': ['PrestoToMySqlTransfer'],
's3_file_transform_operator': ['S3FileTransformOperator'],
'http_operator': ['SimpleHttpOperator'],
'hive_to_druid': ['HiveToDruidTransfer'],
'jdbc_operator': ['JdbcOperator'],
'mssql_operator': ['MsSqlOperator'],
'mssql_to_hive': ['MsSqlToHiveTransfer'],
'slack_operator': ['SlackAPIOperator', 'SlackAPIPostOperator'],
'generic_transfer': ['GenericTransfer'],
'oracle_operator': ['OracleOperator']
}
_import_module_attrs(globals(), _operators)
from airflow.models import BaseOperator
def integrate_plugins():
"""Integrate plugins to the context"""
from airflow.plugins_manager import operators as _operators
for _operator in _operators:
globals()[_operator.__name__] = _operator
|
wxiang7/airflow
|
airflow/operators/__init__.py
|
Python
|
apache-2.0
| 2,497 | 0.0004 |
#!/usr/bin/python
import ldns
pkt = ldns.ldns_pkt.new_query_frm_str("www.google.com",ldns.LDNS_RR_TYPE_ANY, ldns.LDNS_RR_CLASS_IN, ldns.LDNS_QR | ldns.LDNS_AA)
rra = ldns.ldns_rr.new_frm_str("www.google.com. IN A 192.168.1.1",300)
rrb = ldns.ldns_rr.new_frm_str("www.google.com. IN TXT Some\ Description",300)
list = ldns.ldns_rr_list()
if (rra): list.push_rr(rra)
if (rrb): list.push_rr(rrb)
pkt.push_rr_list(ldns.LDNS_SECTION_ANSWER, list)
print("Packet:")
print(pkt)
|
fangdingjun/dnsproxy
|
third-part/ldns-1.6.17/contrib/python/examples/python3/ldns-newpkt.py
|
Python
|
gpl-3.0
| 476 | 0.014706 |
from util import hash256, hash_pks
from copy import deepcopy
class AggregationInfo:
"""
AggregationInfo represents information of how a tree of aggregate
signatures was created. Different tress will result in different
signatures, due to exponentiations required for security.
An AggregationInfo is represented as a map from (message_hash, pk)
to exponents. When verifying, a verifier will take the signature,
along with this map, and raise each public key to the correct
exponent, and multiply the pks together, for identical messages.
"""
def __init__(self, tree, message_hashes, public_keys):
self.tree = tree
self.message_hashes = message_hashes
self.public_keys = public_keys
def empty(self):
return not self.tree
def __eq__(self, other):
return not self.__lt__(other) and not other.__lt__(self)
def __lt__(self, other):
"""
Compares two AggregationInfo objects, this is necessary for sorting
them. Comparison is done by comparing (message hash, pk, exponent)
"""
combined = [(self.message_hashes[i], self.public_keys[i],
self.tree[(self.message_hashes[i], self.public_keys[i])])
for i in range(len(self.public_keys))]
combined_other = [(other.message_hashes[i], other.public_keys[i],
other.tree[(other.message_hashes[i],
other.public_keys[i])])
for i in range(len(other.public_keys))]
for i in range(max(len(combined), len(combined_other))):
if i == len(combined):
return True
if i == len(combined_other):
return False
if combined[i] < combined_other[i]:
return True
if combined_other[i] < combined[i]:
return False
return False
def __str__(self):
ret = ""
for key, value in self.tree.items():
ret += ("(" + key[0].hex() + "," + key[1].serialize().hex()
+ "):\n" + hex(value) + "\n")
return ret
def __deepcopy__(self, memo):
new_tree = deepcopy(self.tree, memo)
new_mh = deepcopy(self.message_hashes, memo)
new_pubkeys = deepcopy(self.public_keys, memo)
return AggregationInfo(new_tree, new_mh, new_pubkeys)
@staticmethod
def from_msg_hash(public_key, message_hash):
tree = {}
tree[(message_hash, public_key)] = 1
return AggregationInfo(tree, [message_hash], [public_key])
@staticmethod
def from_msg(pk, message):
return AggregationInfo.from_msg_hash(pk, hash256(message))
@staticmethod
def simple_merge_infos(aggregation_infos):
"""
Infos are just merged together with no addition of exponents,
since they are disjoint
"""
new_tree = {}
for info in aggregation_infos:
new_tree.update(info.tree)
mh_pubkeys = [k for k, v in new_tree.items()]
mh_pubkeys.sort()
message_hashes = [message_hash for (message_hash, public_key)
in mh_pubkeys]
public_keys = [public_key for (message_hash, public_key)
in mh_pubkeys]
return AggregationInfo(new_tree, message_hashes, public_keys)
@staticmethod
def secure_merge_infos(colliding_infos):
"""
Infos are merged together with combination of exponents
"""
# Groups are sorted by message then pk then exponent
# Each info object (and all of it's exponents) will be
# exponentiated by one of the Ts
colliding_infos.sort()
sorted_keys = []
for info in colliding_infos:
for key, value in info.tree.items():
sorted_keys.append(key)
sorted_keys.sort()
sorted_pks = [public_key for (message_hash, public_key)
in sorted_keys]
computed_Ts = hash_pks(len(colliding_infos), sorted_pks)
# Group order, exponents can be reduced mod the order
order = sorted_pks[0].value.ec.n
new_tree = {}
for i in range(len(colliding_infos)):
for key, value in colliding_infos[i].tree.items():
if key not in new_tree:
# This message & pk have not been included yet
new_tree[key] = (value * computed_Ts[i]) % order
else:
# This message and pk are already included, so multiply
addend = value * computed_Ts[i]
new_tree[key] = (new_tree[key] + addend) % order
mh_pubkeys = [k for k, v in new_tree.items()]
mh_pubkeys.sort()
message_hashes = [message_hash for (message_hash, public_key)
in mh_pubkeys]
public_keys = [public_key for (message_hash, public_key)
in mh_pubkeys]
return AggregationInfo(new_tree, message_hashes, public_keys)
@staticmethod
def merge_infos(aggregation_infos):
messages = set()
colliding_messages = set()
for info in aggregation_infos:
messages_local = set()
for key, value in info.tree.items():
if key[0] in messages and key[0] not in messages_local:
colliding_messages.add(key[0])
messages.add(key[0])
messages_local.add(key[0])
if len(colliding_messages) == 0:
return AggregationInfo.simple_merge_infos(aggregation_infos)
colliding_infos = []
non_colliding_infos = []
for info in aggregation_infos:
info_collides = False
for key, value in info.tree.items():
if key[0] in colliding_messages:
info_collides = True
colliding_infos.append(info)
break
if not info_collides:
non_colliding_infos.append(info)
combined = AggregationInfo.secure_merge_infos(colliding_infos)
non_colliding_infos.append(combined)
return AggregationInfo.simple_merge_infos(non_colliding_infos)
"""
Copyright 2018 Chia Network Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
zcoinofficial/zcoin
|
src/bls-signatures/python-impl/aggregation_info.py
|
Python
|
mit
| 6,831 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TOKEN-BASED AUTH MIDDLEWARE
This WSGI component:
* Verifies that incoming client requests have valid tokens by validating
tokens with the auth service.
* Rejects unauthenticated requests UNLESS it is in 'delay_auth_decision'
mode, which means the final decision is delegated to the downstream WSGI
component (usually the OpenStack service)
* Collects and forwards identity information based on a valid token
such as user name, tenant, etc
Refer to: http://keystone.openstack.org/middlewarearchitecture.html
HEADERS
-------
* Headers starting with HTTP\_ is a standard http header
* Headers starting with HTTP_X is an extended http header
Coming in from initial call from client or customer
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
HTTP_X_AUTH_TOKEN
The client token being passed in.
HTTP_X_STORAGE_TOKEN
The client token being passed in (legacy Rackspace use) to support
swift/cloud files
Used for communication between components
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
WWW-Authenticate
HTTP header returned to a user indicating which endpoint to use
to retrieve a new token
What we add to the request for use by the OpenStack service
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
HTTP_X_IDENTITY_STATUS
'Confirmed' or 'Invalid'
The underlying service will only see a value of 'Invalid' if the Middleware
is configured to run in 'delay_auth_decision' mode
HTTP_X_DOMAIN_ID
Identity service managed unique identifier, string. Only present if
this is a domain-scoped v3 token.
HTTP_X_DOMAIN_NAME
Unique domain name, string. Only present if this is a domain-scoped
v3 token.
HTTP_X_PROJECT_ID
Identity service managed unique identifier, string. Only present if
this is a project-scoped v3 token, or a tenant-scoped v2 token.
HTTP_X_PROJECT_NAME
Project name, unique within owning domain, string. Only present if
this is a project-scoped v3 token, or a tenant-scoped v2 token.
HTTP_X_PROJECT_DOMAIN_ID
Identity service managed unique identifier of owning domain of
project, string. Only present if this is a project-scoped v3 token. If
this variable is set, this indicates that the PROJECT_NAME can only
be assumed to be unique within this domain.
HTTP_X_PROJECT_DOMAIN_NAME
Name of owning domain of project, string. Only present if this is a
project-scoped v3 token. If this variable is set, this indicates that
the PROJECT_NAME can only be assumed to be unique within this domain.
HTTP_X_USER_ID
Identity-service managed unique identifier, string
HTTP_X_USER_NAME
User identifier, unique within owning domain, string
HTTP_X_USER_DOMAIN_ID
Identity service managed unique identifier of owning domain of
user, string. If this variable is set, this indicates that the USER_NAME
can only be assumed to be unique within this domain.
HTTP_X_USER_DOMAIN_NAME
Name of owning domain of user, string. If this variable is set, this
indicates that the USER_NAME can only be assumed to be unique within
this domain.
HTTP_X_ROLES
Comma delimited list of case-sensitive role names
HTTP_X_SERVICE_CATALOG
json encoded keystone service catalog (optional).
HTTP_X_TENANT_ID
*Deprecated* in favor of HTTP_X_PROJECT_ID
Identity service managed unique identifier, string. For v3 tokens, this
will be set to the same value as HTTP_X_PROJECT_ID
HTTP_X_TENANT_NAME
*Deprecated* in favor of HTTP_X_PROJECT_NAME
Project identifier, unique within owning domain, string. For v3 tokens,
this will be set to the same value as HTTP_X_PROJECT_NAME
HTTP_X_TENANT
*Deprecated* in favor of HTTP_X_TENANT_ID and HTTP_X_TENANT_NAME
Keystone-assigned unique identifier, string. For v3 tokens, this
will be set to the same value as HTTP_X_PROJECT_ID
HTTP_X_USER
*Deprecated* in favor of HTTP_X_USER_ID and HTTP_X_USER_NAME
User name, unique within owning domain, string
HTTP_X_ROLE
*Deprecated* in favor of HTTP_X_ROLES
Will contain the same values as HTTP_X_ROLES.
OTHER ENVIRONMENT VARIABLES
---------------------------
keystone.token_info
Information about the token discovered in the process of
validation. This may include extended information returned by the
Keystone token validation call, as well as basic information about
the tenant and user.
"""
import datetime
import logging
import os
import requests
import stat
import tempfile
import time
import urllib
import netaddr
import six
from keystoneclient.common import cms
from keystoneclient.middleware import memcache_crypt
from keystoneclient.openstack.common import jsonutils
from keystoneclient.openstack.common import memorycache
from keystoneclient.openstack.common import timeutils
from keystoneclient import utils
CONF = None
# to pass gate before oslo-config is deployed everywhere,
# try application copies first
for app in 'nova', 'glance', 'quantum', 'cinder':
try:
cfg = __import__('%s.openstack.common.cfg' % app,
fromlist=['%s.openstack.common' % app])
# test which application middleware is running in
if hasattr(cfg, 'CONF') and 'config_file' in cfg.CONF:
CONF = cfg.CONF
break
except ImportError:
pass
if not CONF:
from oslo.config import cfg
CONF = cfg.CONF
# alternative middleware configuration in the main application's
# configuration file e.g. in nova.conf
# [keystone_authtoken]
# auth_host = 127.0.0.1
# auth_port = 35357
# auth_protocol = http
# admin_tenant_name = admin
# admin_user = admin
# admin_password = badpassword
# when deploy Keystone auth_token middleware with Swift, user may elect
# to use Swift memcache instead of the local Keystone memcache. Swift memcache
# is passed in from the request environment and its identified by the
# 'swift.cache' key. However it could be different, depending on deployment.
# To use Swift memcache, you must set the 'cache' option to the environment
# key where the Swift cache object is stored.
opts = [
cfg.StrOpt('auth_admin_prefix',
default='',
help='Prefix to prepend at the beginning of the path'),
cfg.StrOpt('auth_host',
default='127.0.0.1',
help='Host providing the admin Identity API endpoint'),
cfg.IntOpt('auth_port',
default=35357,
help='Port of the admin Identity API endpoint'),
cfg.StrOpt('auth_protocol',
default='https',
help='Protocol of the admin Identity API endpoint'
'(http or https)'),
cfg.StrOpt('auth_uri',
default=None,
# FIXME(dolph): should be default='http://127.0.0.1:5000/v2.0/',
# or (depending on client support) an unversioned, publicly
# accessible identity endpoint (see bug 1207517)
help='Complete public Identity API endpoint'),
cfg.StrOpt('auth_version',
default=None,
help='API version of the admin Identity API endpoint'),
cfg.BoolOpt('delay_auth_decision',
default=False,
help='Do not handle authorization requests within the'
' middleware, but delegate the authorization decision to'
' downstream WSGI components'),
cfg.BoolOpt('http_connect_timeout',
default=None,
help='Request timeout value for communicating with Identity'
' API server.'),
cfg.IntOpt('http_request_max_retries',
default=3,
help='How many times are we trying to reconnect when'
' communicating with Identity API Server.'),
cfg.StrOpt('http_handler',
default=None,
help='Allows to pass in the name of a fake http_handler'
' callback function used instead of httplib.HTTPConnection or'
' httplib.HTTPSConnection. Useful for unit testing where'
' network is not available.'),
cfg.StrOpt('admin_token',
secret=True,
help='Single shared secret with the Keystone configuration'
' used for bootstrapping a Keystone installation, or otherwise'
' bypassing the normal authentication process.'),
cfg.StrOpt('admin_user',
help='Keystone account username'),
cfg.StrOpt('admin_password',
secret=True,
help='Keystone account password'),
cfg.StrOpt('admin_tenant_name',
default='admin',
help='Keystone service account tenant name to validate'
' user tokens'),
cfg.StrOpt('cache',
default=None,
help='Env key for the swift cache'),
cfg.StrOpt('certfile',
help='Required if Keystone server requires client certificate'),
cfg.StrOpt('keyfile',
help='Required if Keystone server requires client certificate'),
cfg.StrOpt('cafile', default=None,
help='A PEM encoded Certificate Authority to use when '
'verifying HTTPs connections. Defaults to system CAs.'),
cfg.BoolOpt('insecure', default=False, help='Verify HTTPS connections.'),
cfg.StrOpt('signing_dir',
help='Directory used to cache files related to PKI tokens'),
cfg.ListOpt('memcached_servers',
deprecated_name='memcache_servers',
help='If defined, the memcache server(s) to use for'
' caching'),
cfg.IntOpt('token_cache_time',
default=300,
help='In order to prevent excessive requests and validations,'
' the middleware uses an in-memory cache for the tokens the'
' Keystone API returns. This is only valid if memcache_servers'
' is defined. Set to -1 to disable caching completely.'),
cfg.IntOpt('revocation_cache_time',
default=1,
help='Value only used for unit testing'),
cfg.StrOpt('memcache_security_strategy',
default=None,
help='(optional) if defined, indicate whether token data'
' should be authenticated or authenticated and encrypted.'
' Acceptable values are MAC or ENCRYPT. If MAC, token data is'
' authenticated (with HMAC) in the cache. If ENCRYPT, token'
' data is encrypted and authenticated in the cache. If the'
' value is not one of these options or empty, auth_token will'
' raise an exception on initialization.'),
cfg.StrOpt('memcache_secret_key',
default=None,
secret=True,
help='(optional, mandatory if memcache_security_strategy is'
' defined) this string is used for key derivation.')
]
CONF.register_opts(opts, group='keystone_authtoken')
LIST_OF_VERSIONS_TO_ATTEMPT = ['v2.0', 'v3.0']
CACHE_KEY_TEMPLATE = 'tokens/%s'
def will_expire_soon(expiry):
"""Determines if expiration is about to occur.
:param expiry: a datetime of the expected expiration
:returns: boolean : true if expiration is within 30 seconds
"""
soon = (timeutils.utcnow() + datetime.timedelta(seconds=30))
return expiry < soon
def safe_quote(s):
"""URL-encode strings that are not already URL-encoded."""
return urllib.quote(s) if s == urllib.unquote(s) else s
class InvalidUserToken(Exception):
pass
class ServiceError(Exception):
pass
class ConfigurationError(Exception):
pass
class NetworkError(Exception):
pass
class MiniResp(object):
def __init__(self, error_message, env, headers=[]):
# The HEAD method is unique: it must never return a body, even if
# it reports an error (RFC-2616 clause 9.4). We relieve callers
# from varying the error responses depending on the method.
if env['REQUEST_METHOD'] == 'HEAD':
self.body = ['']
else:
self.body = [error_message]
self.headers = list(headers)
self.headers.append(('Content-type', 'text/plain'))
class AuthProtocol(object):
"""Auth Middleware that handles authenticating client calls."""
def __init__(self, app, conf):
self.LOG = logging.getLogger(conf.get('log_name', __name__))
self.LOG.info('Starting keystone auth_token middleware')
self.conf = conf
self.app = app
# delay_auth_decision means we still allow unauthenticated requests
# through and we let the downstream service make the final decision
self.delay_auth_decision = (self._conf_get('delay_auth_decision') in
(True, 'true', 't', '1', 'on', 'yes', 'y'))
# where to find the auth service (we use this to validate tokens)
auth_host = self._conf_get('auth_host')
auth_port = int(self._conf_get('auth_port'))
auth_protocol = self._conf_get('auth_protocol')
self.auth_admin_prefix = self._conf_get('auth_admin_prefix')
self.auth_uri = self._conf_get('auth_uri')
if netaddr.valid_ipv6(auth_host):
# Note(dzyu) it is an IPv6 address, so it needs to be wrapped
# with '[]' to generate a valid IPv6 URL, based on
# http://www.ietf.org/rfc/rfc2732.txt
auth_host = '[%s]' % auth_host
self.request_uri = '%s://%s:%s' % (auth_protocol, auth_host, auth_port)
if self.auth_uri is None:
self.LOG.warning(
'Configuring auth_uri to point to the public identity '
'endpoint is required; clients may not be able to '
'authenticate against an admin endpoint')
# FIXME(dolph): drop support for this fallback behavior as
# documented in bug 1207517
self.auth_uri = self.request_uri
# SSL
self.cert_file = self._conf_get('certfile')
self.key_file = self._conf_get('keyfile')
self.ssl_ca_file = self._conf_get('cafile')
self.ssl_insecure = self._conf_get('insecure')
# signing
self.signing_dirname = self._conf_get('signing_dir')
if self.signing_dirname is None:
self.signing_dirname = tempfile.mkdtemp(prefix='keystone-signing-')
self.LOG.info('Using %s as cache directory for signing certificate' %
self.signing_dirname)
self.verify_signing_dir()
val = '%s/signing_cert.pem' % self.signing_dirname
self.signing_cert_file_name = val
val = '%s/cacert.pem' % self.signing_dirname
self.signing_ca_file_name = val
val = '%s/revoked.pem' % self.signing_dirname
self.revoked_file_name = val
# Credentials used to verify this component with the Auth service since
# validating tokens is a privileged call
self.admin_token = self._conf_get('admin_token')
self.admin_token_expiry = None
self.admin_user = self._conf_get('admin_user')
self.admin_password = self._conf_get('admin_password')
self.admin_tenant_name = self._conf_get('admin_tenant_name')
# Token caching via memcache
self._cache = None
self._cache_initialized = False # cache already initialzied?
# memcache value treatment, ENCRYPT or MAC
self._memcache_security_strategy = \
self._conf_get('memcache_security_strategy')
if self._memcache_security_strategy is not None:
self._memcache_security_strategy = \
self._memcache_security_strategy.upper()
self._memcache_secret_key = \
self._conf_get('memcache_secret_key')
self._assert_valid_memcache_protection_config()
# By default the token will be cached for 5 minutes
self.token_cache_time = int(self._conf_get('token_cache_time'))
self._token_revocation_list = None
self._token_revocation_list_fetched_time = None
self.token_revocation_list_cache_timeout = datetime.timedelta(
seconds=self._conf_get('revocation_cache_time'))
http_connect_timeout_cfg = self._conf_get('http_connect_timeout')
self.http_connect_timeout = (http_connect_timeout_cfg and
int(http_connect_timeout_cfg))
self.auth_version = None
self.http_request_max_retries = \
self._conf_get('http_request_max_retries')
def _assert_valid_memcache_protection_config(self):
if self._memcache_security_strategy:
if self._memcache_security_strategy not in ('MAC', 'ENCRYPT'):
raise Exception('memcache_security_strategy must be '
'ENCRYPT or MAC')
if not self._memcache_secret_key:
raise Exception('mecmache_secret_key must be defined when '
'a memcache_security_strategy is defined')
def _init_cache(self, env):
cache = self._conf_get('cache')
memcache_servers = self._conf_get('memcached_servers')
if cache and env.get(cache, None) is not None:
# use the cache from the upstream filter
self.LOG.info('Using %s memcache for caching token', cache)
self._cache = env.get(cache)
else:
# use Keystone memcache
self._cache = memorycache.get_client(memcache_servers)
self._cache_initialized = True
def _conf_get(self, name):
# try config from paste-deploy first
if name in self.conf:
return self.conf[name]
else:
return CONF.keystone_authtoken[name]
def _choose_api_version(self):
"""Determine the api version that we should use."""
# If the configuration specifies an auth_version we will just
# assume that is correct and use it. We could, of course, check
# that this version is supported by the server, but in case
# there are some problems in the field, we want as little code
# as possible in the way of letting auth_token talk to the
# server.
if self._conf_get('auth_version'):
version_to_use = self._conf_get('auth_version')
self.LOG.info('Auth Token proceeding with requested %s apis',
version_to_use)
else:
version_to_use = None
versions_supported_by_server = self._get_supported_versions()
if versions_supported_by_server:
for version in LIST_OF_VERSIONS_TO_ATTEMPT:
if version in versions_supported_by_server:
version_to_use = version
break
if version_to_use:
self.LOG.info('Auth Token confirmed use of %s apis',
version_to_use)
else:
self.LOG.error(
'Attempted versions [%s] not in list supported by '
'server [%s]',
', '.join(LIST_OF_VERSIONS_TO_ATTEMPT),
', '.join(versions_supported_by_server))
raise ServiceError('No compatible apis supported by server')
return version_to_use
def _get_supported_versions(self):
versions = []
response, data = self._json_request('GET', '/')
if response.status_code == 501:
self.LOG.warning("Old keystone installation found...assuming v2.0")
versions.append("v2.0")
elif response.status_code != 300:
self.LOG.error('Unable to get version info from keystone: %s' %
response.status_code)
raise ServiceError('Unable to get version info from keystone')
else:
try:
for version in data['versions']['values']:
versions.append(version['id'])
except KeyError:
self.LOG.error(
'Invalid version response format from server', data)
raise ServiceError('Unable to parse version response '
'from keystone')
self.LOG.debug('Server reports support for api versions: %s',
', '.join(versions))
return versions
def __call__(self, env, start_response):
"""Handle incoming request.
Authenticate send downstream on success. Reject request if
we can't authenticate.
"""
self.LOG.debug('Authenticating user token')
# initialize memcache if we haven't done so
if not self._cache_initialized:
self._init_cache(env)
try:
self._remove_auth_headers(env)
user_token = self._get_user_token_from_header(env)
token_info = self._validate_user_token(user_token)
env['keystone.token_info'] = token_info
user_headers = self._build_user_headers(token_info)
self._add_headers(env, user_headers)
return self.app(env, start_response)
except InvalidUserToken:
if self.delay_auth_decision:
self.LOG.info(
'Invalid user token - deferring reject downstream')
self._add_headers(env, {'X-Identity-Status': 'Invalid'})
return self.app(env, start_response)
else:
self.LOG.info('Invalid user token - rejecting request')
return self._reject_request(env, start_response)
except ServiceError as e:
self.LOG.critical('Unable to obtain admin token: %s' % e)
resp = MiniResp('Service unavailable', env)
start_response('503 Service Unavailable', resp.headers)
return resp.body
def _remove_auth_headers(self, env):
"""Remove headers so a user can't fake authentication.
:param env: wsgi request environment
"""
auth_headers = (
'X-Identity-Status',
'X-Domain-Id',
'X-Domain-Name',
'X-Project-Id',
'X-Project-Name',
'X-Project-Domain-Id',
'X-Project-Domain-Name',
'X-User-Id',
'X-User-Name',
'X-User-Domain-Id',
'X-User-Domain-Name',
'X-Roles',
'X-Service-Catalog',
# Deprecated
'X-User',
'X-Tenant-Id',
'X-Tenant-Name',
'X-Tenant',
'X-Role',
)
self.LOG.debug('Removing headers from request environment: %s' %
','.join(auth_headers))
self._remove_headers(env, auth_headers)
def _get_user_token_from_header(self, env):
"""Get token id from request.
:param env: wsgi request environment
:return token id
:raises InvalidUserToken if no token is provided in request
"""
token = self._get_header(env, 'X-Auth-Token',
self._get_header(env, 'X-Storage-Token'))
if token:
return token
else:
if not self.delay_auth_decision:
self.LOG.warn("Unable to find authentication token"
" in headers")
self.LOG.debug("Headers: %s", env)
raise InvalidUserToken('Unable to find token in headers')
def _reject_request(self, env, start_response):
"""Redirect client to auth server.
:param env: wsgi request environment
:param start_response: wsgi response callback
:returns HTTPUnauthorized http response
"""
headers = [('WWW-Authenticate', 'Keystone uri=\'%s\'' % self.auth_uri)]
resp = MiniResp('Authentication required', env, headers)
start_response('401 Unauthorized', resp.headers)
return resp.body
def get_admin_token(self):
"""Return admin token, possibly fetching a new one.
if self.admin_token_expiry is set from fetching an admin token, check
it for expiration, and request a new token is the existing token
is about to expire.
:return admin token id
:raise ServiceError when unable to retrieve token from keystone
"""
if self.admin_token_expiry:
if will_expire_soon(self.admin_token_expiry):
self.admin_token = None
if not self.admin_token:
(self.admin_token,
self.admin_token_expiry) = self._request_admin_token()
return self.admin_token
def _http_request(self, method, path, **kwargs):
"""HTTP request helper used to make unspecified content type requests.
:param method: http method
:param path: relative request url
:return (http response object, response body)
:raise ServerError when unable to communicate with keystone
"""
url = "%s/%s" % (self.request_uri, path.lstrip('/'))
kwargs.setdefault('timeout', self.http_connect_timeout)
if self.cert_file and self.key_file:
kwargs['cert'] = (self.cert_file, self.key_file)
elif self.cert_file or self.key_file:
self.LOG.warn('Cannot use only a cert or key file. '
'Please provide both. Ignoring.')
kwargs['verify'] = self.ssl_ca_file or True
if self.ssl_insecure:
kwargs['verify'] = False
RETRIES = self.http_request_max_retries
retry = 0
while True:
try:
response = requests.request(method, url, **kwargs)
break
except Exception as e:
if retry >= RETRIES:
self.LOG.error('HTTP connection exception: %s', e)
raise NetworkError('Unable to communicate with keystone')
# NOTE(vish): sleep 0.5, 1, 2
self.LOG.warn('Retrying on HTTP connection exception: %s' % e)
time.sleep(2.0 ** retry / 2)
retry += 1
return response
def _json_request(self, method, path, body=None, additional_headers=None):
"""HTTP request helper used to make json requests.
:param method: http method
:param path: relative request url
:param body: dict to encode to json as request body. Optional.
:param additional_headers: dict of additional headers to send with
http request. Optional.
:return (http response object, response body parsed as json)
:raise ServerError when unable to communicate with keystone
"""
kwargs = {
'headers': {
'Content-type': 'application/json',
'Accept': 'application/json',
},
}
if additional_headers:
kwargs['headers'].update(additional_headers)
if body:
kwargs['data'] = jsonutils.dumps(body)
path = self.auth_admin_prefix + path
response = self._http_request(method, path, **kwargs)
try:
data = jsonutils.loads(response.text)
except ValueError:
self.LOG.debug('Keystone did not return json-encoded body')
data = {}
return response, data
def _request_admin_token(self):
"""Retrieve new token as admin user from keystone.
:return token id upon success
:raises ServerError when unable to communicate with keystone
Irrespective of the auth version we are going to use for the
user token, for simplicity we always use a v2 admin token to
validate the user token.
"""
params = {
'auth': {
'passwordCredentials': {
'username': self.admin_user,
'password': self.admin_password,
},
'tenantName': self.admin_tenant_name,
}
}
response, data = self._json_request('POST',
'/v2.0/tokens',
body=params)
try:
token = data['access']['token']['id']
expiry = data['access']['token']['expires']
if not (token and expiry):
raise AssertionError('invalid token or expire')
datetime_expiry = timeutils.parse_isotime(expiry)
return (token, timeutils.normalize_time(datetime_expiry))
except (AssertionError, KeyError):
self.LOG.warn(
"Unexpected response from keystone service: %s", data)
raise ServiceError('invalid json response')
except (ValueError):
self.LOG.warn(
"Unable to parse expiration time from token: %s", data)
raise ServiceError('invalid json response')
def _validate_user_token(self, user_token, retry=True):
"""Authenticate user using PKI
:param user_token: user's token id
:param retry: Ignored, as it is not longer relevant
:return uncrypted body of the token if the token is valid
:raise InvalidUserToken if token is rejected
:no longer raises ServiceError since it no longer makes RPC
"""
token_id = None
try:
token_id = cms.cms_hash_token(user_token)
cached = self._cache_get(token_id)
if cached:
return cached
if cms.is_ans1_token(user_token):
verified = self.verify_signed_token(user_token)
data = jsonutils.loads(verified)
else:
data = self.verify_uuid_token(user_token, retry)
expires = self._confirm_token_not_expired(data)
self._cache_put(token_id, data, expires)
return data
except NetworkError:
self.LOG.debug('Token validation failure.', exc_info=True)
self.LOG.warn("Authorization failed for token %s", token_id)
raise InvalidUserToken('Token authorization failed')
except Exception:
self.LOG.debug('Token validation failure.', exc_info=True)
if token_id:
self._cache_store_invalid(token_id)
self.LOG.warn("Authorization failed for token %s", token_id)
raise InvalidUserToken('Token authorization failed')
def _token_is_v2(self, token_info):
return ('access' in token_info)
def _token_is_v3(self, token_info):
return ('token' in token_info)
def _build_user_headers(self, token_info):
"""Convert token object into headers.
Build headers that represent authenticated user - see main
doc info at start of file for details of headers to be defined.
:param token_info: token object returned by keystone on authentication
:raise InvalidUserToken when unable to parse token object
"""
def get_tenant_info():
"""Returns a (tenant_id, tenant_name) tuple from context."""
def essex():
"""Essex puts the tenant ID and name on the token."""
return (token['tenant']['id'], token['tenant']['name'])
def pre_diablo():
"""Pre-diablo, Keystone only provided tenantId."""
return (token['tenantId'], token['tenantId'])
def default_tenant():
"""Pre-grizzly, assume the user's default tenant."""
return (user['tenantId'], user['tenantName'])
for method in [essex, pre_diablo, default_tenant]:
try:
return method()
except KeyError:
pass
raise InvalidUserToken('Unable to determine tenancy.')
# For clarity. set all those attributes that are optional in
# either a v2 or v3 token to None first
domain_id = None
domain_name = None
project_id = None
project_name = None
user_domain_id = None
user_domain_name = None
project_domain_id = None
project_domain_name = None
if self._token_is_v2(token_info):
user = token_info['access']['user']
token = token_info['access']['token']
roles = ','.join([role['name'] for role in user.get('roles', [])])
catalog_root = token_info['access']
catalog_key = 'serviceCatalog'
project_id, project_name = get_tenant_info()
else:
#v3 token
token = token_info['token']
user = token['user']
user_domain_id = user['domain']['id']
user_domain_name = user['domain']['name']
roles = (','.join([role['name']
for role in token.get('roles', [])]))
catalog_root = token
catalog_key = 'catalog'
# For v3, the server will put in the default project if there is
# one, so no need for us to add it here (like we do for a v2 token)
if 'domain' in token:
domain_id = token['domain']['id']
domain_name = token['domain']['name']
elif 'project' in token:
project_id = token['project']['id']
project_name = token['project']['name']
project_domain_id = token['project']['domain']['id']
project_domain_name = token['project']['domain']['name']
user_id = user['id']
user_name = user['name']
rval = {
'X-Identity-Status': 'Confirmed',
'X-Domain-Id': domain_id,
'X-Domain-Name': domain_name,
'X-Project-Id': project_id,
'X-Project-Name': project_name,
'X-Project-Domain-Id': project_domain_id,
'X-Project-Domain-Name': project_domain_name,
'X-User-Id': user_id,
'X-User-Name': user_name,
'X-User-Domain-Id': user_domain_id,
'X-User-Domain-Name': user_domain_name,
'X-Roles': roles,
# Deprecated
'X-User': user_name,
'X-Tenant-Id': project_id,
'X-Tenant-Name': project_name,
'X-Tenant': project_name,
'X-Role': roles,
}
self.LOG.debug("Received request from user: %s with project_id : %s"
" and roles: %s ", user_id, project_id, roles)
try:
catalog = catalog_root[catalog_key]
rval['X-Service-Catalog'] = jsonutils.dumps(catalog)
except KeyError:
pass
return rval
def _header_to_env_var(self, key):
"""Convert header to wsgi env variable.
:param key: http header name (ex. 'X-Auth-Token')
:return wsgi env variable name (ex. 'HTTP_X_AUTH_TOKEN')
"""
return 'HTTP_%s' % key.replace('-', '_').upper()
def _add_headers(self, env, headers):
"""Add http headers to environment."""
for (k, v) in six.iteritems(headers):
env_key = self._header_to_env_var(k)
env[env_key] = v
def _remove_headers(self, env, keys):
"""Remove http headers from environment."""
for k in keys:
env_key = self._header_to_env_var(k)
try:
del env[env_key]
except KeyError:
pass
def _get_header(self, env, key, default=None):
"""Get http header from environment."""
env_key = self._header_to_env_var(key)
return env.get(env_key, default)
def _cache_get(self, token_id, ignore_expires=False):
"""Return token information from cache.
If token is invalid raise InvalidUserToken
return token only if fresh (not expired).
"""
if self._cache and token_id:
if self._memcache_security_strategy is None:
key = CACHE_KEY_TEMPLATE % token_id
serialized = self._cache.get(key)
else:
keys = memcache_crypt.derive_keys(
token_id,
self._memcache_secret_key,
self._memcache_security_strategy)
cache_key = CACHE_KEY_TEMPLATE % (
memcache_crypt.get_cache_key(keys))
raw_cached = self._cache.get(cache_key)
try:
# unprotect_data will return None if raw_cached is None
serialized = memcache_crypt.unprotect_data(keys,
raw_cached)
except Exception:
msg = 'Failed to decrypt/verify cache data'
self.LOG.exception(msg)
# this should have the same effect as data not
# found in cache
serialized = None
if serialized is None:
return None
# Note that 'invalid' and (data, expires) are the only
# valid types of serialized cache entries, so there is not
# a collision with jsonutils.loads(serialized) == None.
cached = jsonutils.loads(serialized)
if cached == 'invalid':
self.LOG.debug('Cached Token %s is marked unauthorized',
token_id)
raise InvalidUserToken('Token authorization failed')
data, expires = cached
if ignore_expires or time.time() < float(expires):
self.LOG.debug('Returning cached token %s', token_id)
return data
else:
self.LOG.debug('Cached Token %s seems expired', token_id)
def _cache_store(self, token_id, data):
"""Store value into memcache.
data may be the string 'invalid' or a tuple like (data, expires)
"""
serialized_data = jsonutils.dumps(data)
if self._memcache_security_strategy is None:
cache_key = CACHE_KEY_TEMPLATE % token_id
data_to_store = serialized_data
else:
keys = memcache_crypt.derive_keys(
token_id,
self._memcache_secret_key,
self._memcache_security_strategy)
cache_key = CACHE_KEY_TEMPLATE % memcache_crypt.get_cache_key(keys)
data_to_store = memcache_crypt.protect_data(keys, serialized_data)
# Historically the swift cache conection used the argument
# timeout= for the cache timeout, but this has been unified
# with the official python memcache client with time= since
# grizzly, we still need to handle folsom for a while until
# this could get removed.
try:
self._cache.set(cache_key,
data_to_store,
time=self.token_cache_time)
except(TypeError):
self._cache.set(cache_key,
data_to_store,
timeout=self.token_cache_time)
def _confirm_token_not_expired(self, data):
if not data:
raise InvalidUserToken('Token authorization failed')
if self._token_is_v2(data):
timestamp = data['access']['token']['expires']
elif self._token_is_v3(data):
timestamp = data['token']['expires_at']
else:
raise InvalidUserToken('Token authorization failed')
expires = timeutils.parse_isotime(timestamp).strftime('%s')
if time.time() >= float(expires):
self.LOG.debug('Token expired a %s', timestamp)
raise InvalidUserToken('Token authorization failed')
return expires
def _cache_put(self, token_id, data, expires):
"""Put token data into the cache.
Stores the parsed expire date in cache allowing
quick check of token freshness on retrieval.
"""
if self._cache:
self.LOG.debug('Storing %s token in memcache', token_id)
self._cache_store(token_id, (data, expires))
def _cache_store_invalid(self, token_id):
"""Store invalid token in cache."""
if self._cache:
self.LOG.debug(
'Marking token %s as unauthorized in memcache', token_id)
self._cache_store(token_id, 'invalid')
def cert_file_missing(self, proc_output, file_name):
return (file_name in proc_output and not os.path.exists(file_name))
def verify_uuid_token(self, user_token, retry=True):
"""Authenticate user token with keystone.
:param user_token: user's token id
:param retry: flag that forces the middleware to retry
user authentication when an indeterminate
response is received. Optional.
:return token object received from keystone on success
:raise InvalidUserToken if token is rejected
:raise ServiceError if unable to authenticate token
"""
# Determine the highest api version we can use.
if not self.auth_version:
self.auth_version = self._choose_api_version()
if self.auth_version == 'v3.0':
headers = {'X-Auth-Token': self.get_admin_token(),
'X-Subject-Token': safe_quote(user_token)}
response, data = self._json_request(
'GET',
'/v3/auth/tokens',
additional_headers=headers)
else:
headers = {'X-Auth-Token': self.get_admin_token()}
response, data = self._json_request(
'GET',
'/v2.0/tokens/%s' % safe_quote(user_token),
additional_headers=headers)
if response.status_code == 200:
return data
if response.status_code == 404:
self.LOG.warn("Authorization failed for token %s", user_token)
raise InvalidUserToken('Token authorization failed')
if response.status_code == 401:
self.LOG.info(
'Keystone rejected admin token %s, resetting', headers)
self.admin_token = None
else:
self.LOG.error('Bad response code while validating token: %s' %
response.status_code)
if retry:
self.LOG.info('Retrying validation')
return self._validate_user_token(user_token, False)
else:
self.LOG.warn("Invalid user token: %s. Keystone response: %s.",
user_token, data)
raise InvalidUserToken()
def is_signed_token_revoked(self, signed_text):
"""Indicate whether the token appears in the revocation list."""
revocation_list = self.token_revocation_list
revoked_tokens = revocation_list.get('revoked', [])
if not revoked_tokens:
return
revoked_ids = (x['id'] for x in revoked_tokens)
token_id = utils.hash_signed_token(signed_text)
for revoked_id in revoked_ids:
if token_id == revoked_id:
self.LOG.debug('Token %s is marked as having been revoked',
token_id)
return True
return False
def cms_verify(self, data):
"""Verifies the signature of the provided data's IAW CMS syntax.
If either of the certificate files are missing, fetch them and
retry.
"""
while True:
try:
output = cms.cms_verify(data, self.signing_cert_file_name,
self.signing_ca_file_name)
except cms.subprocess.CalledProcessError as err:
if self.cert_file_missing(err.output,
self.signing_cert_file_name):
self.fetch_signing_cert()
continue
if self.cert_file_missing(err.output,
self.signing_ca_file_name):
self.fetch_ca_cert()
continue
self.LOG.warning('Verify error: %s' % err)
raise err
return output
def verify_signed_token(self, signed_text):
"""Check that the token is unrevoked and has a valid signature."""
if self.is_signed_token_revoked(signed_text):
raise InvalidUserToken('Token has been revoked')
formatted = cms.token_to_cms(signed_text)
return self.cms_verify(formatted)
def verify_signing_dir(self):
if os.path.exists(self.signing_dirname):
if not os.access(self.signing_dirname, os.W_OK):
raise ConfigurationError(
'unable to access signing_dir %s' % self.signing_dirname)
if os.stat(self.signing_dirname).st_uid != os.getuid():
self.LOG.warning(
'signing_dir is not owned by %s' % os.getuid())
current_mode = stat.S_IMODE(os.stat(self.signing_dirname).st_mode)
if current_mode != stat.S_IRWXU:
self.LOG.warning(
'signing_dir mode is %s instead of %s' %
(oct(current_mode), oct(stat.S_IRWXU)))
else:
os.makedirs(self.signing_dirname, stat.S_IRWXU)
@property
def token_revocation_list_fetched_time(self):
if not self._token_revocation_list_fetched_time:
# If the fetched list has been written to disk, use its
# modification time.
if os.path.exists(self.revoked_file_name):
mtime = os.path.getmtime(self.revoked_file_name)
fetched_time = datetime.datetime.fromtimestamp(mtime)
# Otherwise the list will need to be fetched.
else:
fetched_time = datetime.datetime.min
self._token_revocation_list_fetched_time = fetched_time
return self._token_revocation_list_fetched_time
@token_revocation_list_fetched_time.setter
def token_revocation_list_fetched_time(self, value):
self._token_revocation_list_fetched_time = value
@property
def token_revocation_list(self):
timeout = (self.token_revocation_list_fetched_time +
self.token_revocation_list_cache_timeout)
list_is_current = timeutils.utcnow() < timeout
if list_is_current:
# Load the list from disk if required
if not self._token_revocation_list:
with open(self.revoked_file_name, 'r') as f:
self._token_revocation_list = jsonutils.loads(f.read())
else:
self.token_revocation_list = self.fetch_revocation_list()
return self._token_revocation_list
@token_revocation_list.setter
def token_revocation_list(self, value):
"""Save a revocation list to memory and to disk.
:param value: A json-encoded revocation list
"""
self._token_revocation_list = jsonutils.loads(value)
self.token_revocation_list_fetched_time = timeutils.utcnow()
with open(self.revoked_file_name, 'w') as f:
f.write(value)
def fetch_revocation_list(self, retry=True):
headers = {'X-Auth-Token': self.get_admin_token()}
response, data = self._json_request('GET', '/v2.0/tokens/revoked',
additional_headers=headers)
if response.status_code == 401:
if retry:
self.LOG.info(
'Keystone rejected admin token %s, resetting admin token',
headers)
self.admin_token = None
return self.fetch_revocation_list(retry=False)
if response.status_code != 200:
raise ServiceError('Unable to fetch token revocation list.')
if 'signed' not in data:
raise ServiceError('Revocation list improperly formatted.')
return self.cms_verify(data['signed'])
def fetch_signing_cert(self):
path = self.auth_admin_prefix.rstrip('/')
path += '/v2.0/certificates/signing'
response = self._http_request('GET', path)
def write_cert_file(data):
with open(self.signing_cert_file_name, 'w') as certfile:
certfile.write(data)
try:
#todo check response
try:
write_cert_file(response.text)
except IOError:
self.verify_signing_dir()
write_cert_file(response.text)
except (AssertionError, KeyError):
self.LOG.warn(
"Unexpected response from keystone service: %s", response.text)
raise ServiceError('invalid json response')
def fetch_ca_cert(self):
path = self.auth_admin_prefix.rstrip('/') + '/v2.0/certificates/ca'
response = self._http_request('GET', path)
try:
#todo check response
with open(self.signing_ca_file_name, 'w') as certfile:
certfile.write(response.text)
except (AssertionError, KeyError):
self.LOG.warn(
"Unexpected response from keystone service: %s", response.text)
raise ServiceError('invalid json response')
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return AuthProtocol(app, conf)
return auth_filter
def app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return AuthProtocol(None, conf)
|
citrix-openstack-build/python-keystoneclient
|
keystoneclient/middleware/auth_token.py
|
Python
|
apache-2.0
| 50,380 | 0.000099 |
import re
from django.core.management import call_command
from django.test import TestCase
from class_fixtures.tests.models import (Band, Musician,
Membership, Roadie, Competency, JobPosting, ComprehensiveModel)
from class_fixtures.utils import string_stdout
class DumpDataTests(TestCase):
def test_encoding_declaration(self):
with string_stdout() as output:
call_command('dumpdata', 'tests', format='class')
self.assertTrue(output.getvalue().startswith('# -*- coding: utf-8 -*-\n'))
def test_correct_imports_in_output(self):
band = Band.objects.create(name="Brutallica")
musician = Musician.objects.create(name="Lars Toorich")
membership = Membership.objects.create(band=band, musician=musician, instrument="Bongos", date_joined="1982-01-01")
roadie = Roadie.objects.create(name="Ciggy Tardust")
roadie.hauls_for.add(band)
with string_stdout() as output:
call_command('dumpdata', 'tests', format='class', exclude=[
'tests.Party', 'tests.Politician'])
lines = output.getvalue().split('\n')
fixture_import, model_imports = lines[3], lines[4]
self.assertEqual(fixture_import, "from class_fixtures.models import Fixture")
self.assertEqual(model_imports, "from tests.models import Band, Membership, Musician, Roadie")
def test_correct_fixtures_in_output(self):
band = Band.objects.create(name="Brutallica")
musician = Musician.objects.create(name="Lars Toorich")
membership = Membership.objects.create(band=band, musician=musician, instrument="Bongos", date_joined="1982-01-01")
roadie = Roadie.objects.create(name="Ciggy Tardust")
roadie.hauls_for.add(band)
with string_stdout() as output:
call_command('dumpdata', 'tests', format='class', exclude=[
'tests.Party', 'tests.Politician'])
lines = output.getvalue().split('\n')
self.assertEqual(lines[6], 'tests_band_fixture = Fixture(Band)')
self.assertEqual(lines[7], 'tests_musician_fixture = Fixture(Musician)')
self.assertEqual(lines[8], 'tests_membership_fixture = Fixture(Membership)')
self.assertEqual(lines[9], 'tests_roadie_fixture = Fixture(Roadie)')
def test_correct_fixture_populating(self):
band = Band.objects.create(name="Brutallica")
musician = Musician.objects.create(name="Lars Toorich")
membership = Membership.objects.create(band=band, musician=musician, instrument="Bongos", date_joined="1982-01-01")
roadie = Roadie.objects.create(name="Ciggy Tardust")
roadie.hauls_for.add(band)
with string_stdout() as output:
call_command('dumpdata', 'tests', format='class', exclude=[
'tests.Party', 'tests.Politician'])
lines = output.getvalue().split('\n')
self.assertEqual(lines[11], "tests_band_fixture.add(1, **{'name': u'Brutallica'})")
self.assertEqual(lines[12], "tests_musician_fixture.add(1, **{'name': u'Lars Toorich'})")
self.assertEqual(lines[13], "tests_membership_fixture.add(1, **{'band': 1, 'date_joined': datetime.date(1982, 1, 1), 'instrument': u'Bongos', 'musician': 1})")
self.assertEqual(lines[14], "tests_roadie_fixture.add(1, **{'hauls_for': [1], 'name': u'Ciggy Tardust'})")
def test_escaped_characters_in_strings(self):
band = Band.objects.create(name="The Apostrophe's Apostles")
musician = Musician.objects.create(name="Ivan \"The Terrible\" Terrible")
musician2 = Musician.objects.create(name="\\, aka the artist formerly known as Backslash")
membership = Membership.objects.create(band=band, musician=musician, instrument="Bass", date_joined="2000-12-05")
membership = Membership.objects.create(band=band, musician=musician2, instrument="Guitar", date_joined="2000-12-05")
roadie = Roadie.objects.create(name='Simon \'Single Quote\' DeForestation')
roadie.hauls_for.add(band)
with string_stdout() as output:
call_command('dumpdata', 'tests', format='class', exclude=[
'tests.Party', 'tests.Politician'])
lines = output.getvalue().split('\n')
self.assertEqual(lines[11], """tests_band_fixture.add(1, **{'name': u"The Apostrophe's Apostles"})""")
self.assertEqual(lines[12], """tests_musician_fixture.add(1, **{'name': u'Ivan "The Terrible" Terrible'})""")
# Raw string to represent what's actually printed out, would be four backslashes without it
self.assertEqual(lines[13], r"""tests_musician_fixture.add(2, **{'name': u'\\, aka the artist formerly known as Backslash'})""")
self.assertEqual(lines[14], """tests_membership_fixture.add(1, **{'band': 1, 'date_joined': datetime.date(2000, 12, 5), 'instrument': u'Bass', 'musician': 1})""")
self.assertEqual(lines[15], """tests_membership_fixture.add(2, **{'band': 1, 'date_joined': datetime.date(2000, 12, 5), 'instrument': u'Guitar', 'musician': 2})""")
self.assertEqual(lines[16], """tests_roadie_fixture.add(1, **{'hauls_for': [1], 'name': u"Simon 'Single Quote' DeForestation"})""")
def test_complex_model(self):
import datetime
from decimal import Decimal
# https://docs.djangoproject.com/en/dev/ref/models/fields/#bigintegerfield
bigintfield_max = 9223372036854775807
cm = ComprehensiveModel.objects.create(
bigint = bigintfield_max,
boolean = True,
char = 'Hey hey now',
date = datetime.date(2011, 6, 6),
datetime = datetime.datetime(2011, 5, 5, 12, 30, 7),
decimal = Decimal('1234.56'),
floatf = 2345.67,
integer = 345678,
nullboolean = None,
time = datetime.time(14, 45, 30),
text = "Bacon ipsum dolor sit amet ham eiusmod cupidatat, "
"hamburger voluptate non dolor. Pork belly excepteur chuck, shankle ullamco "
"fugiat meatloaf est quis meatball sint dolore. Shank drumstick sint, tri-tip "
"deserunt proident in. Pancetta laboris culpa beef, pork chop venison magna "
"duis tail. Nulla in sirloin, minim bresaola ham cupidatat drumstick spare ribs "
"eiusmod ut. Shankle mollit ut, short ribs pork chop drumstick meatloaf duis "
"""elit reprehenderit. Cillum short loin flank est beef.
And the second paragraph looks like this.""")
with string_stdout() as output:
call_command('dumpdata', 'tests', format='class', exclude=[
'tests.Party', 'tests.Politician'])
lines = output.getvalue().split('\n')
self.assertEqual(lines[4], "from tests.models import ComprehensiveModel")
self.assertEqual(lines[6], "tests_comprehensivemodel_fixture = Fixture(ComprehensiveModel)")
# Write-only code that turns the dumpdata output into a dictionary of
# keys and values to be tested individually
model_fields = dict([(j[0].strip("'"), j[1].strip(" ")) for j in
[i.split(':') for i in re.split(", '|\{|\}\)", lines[8]) if ':' in i]
])
# Depending on the platform where the test is being run, bigintfield_max
# may be an integer or a long, depending on the value of sys.maxint.
# The repr() result on the field will vary accordingly (L suffix or not),
# so assertEqual instead repr()'s the value (like the serializer does)
# because we can't have a single string representation for the value
# that would work across all platforms.
#
# There's one more complication: on some systems, Python sees the
# bigintfield_max value as an integer, but after it comes back from the
# database, it is transformed into a long, presumably due to the SQLite
# configuration. So, we retrieve the object from the database and repr()
# its bigint field instead of the original value.
db_cm = ComprehensiveModel.objects.get(pk=cm.pk)
self.assertEqual(model_fields['bigint'], repr(db_cm.bigint))
self.assertEqual(model_fields['boolean'], 'True')
self.assertEqual(model_fields['char'], "u'Hey hey now'")
self.assertEqual(model_fields['date'], 'datetime.date(2011, 6, 6)')
self.assertEqual(model_fields['datetime'], 'datetime.datetime(2011, 5, 5, 12, 30, 7)')
self.assertEqual(model_fields['decimal'], "Decimal('1234.56')")
# Float representations are tricky prior to Python 2.7, hopefully this
# is a good enough test for correctness of repr(2345.67) there
self.assertRegexpMatches(model_fields['floatf'], '^2345.6(70*|69*)\d?$')
self.assertEqual(model_fields['integer'], '345678')
self.assertEqual(model_fields['nullboolean'], 'None')
self.assertEqual(model_fields['time'], 'datetime.time(14, 45, 30)')
self.assertEqual(model_fields['text'], "u'Bacon ipsum dolor sit amet ham eiusmod cupidatat, "
"hamburger voluptate non dolor. Pork belly excepteur chuck, shankle ullamco fugiat meatloaf "
"est quis meatball sint dolore. Shank drumstick sint, tri-tip deserunt proident in. Pancetta "
"laboris culpa beef, pork chop venison magna duis tail. Nulla in sirloin, minim bresaola ham "
"cupidatat drumstick spare ribs eiusmod ut. Shankle mollit ut, short ribs pork chop drumstick "
"meatloaf duis elit reprehenderit. Cillum short loin flank est beef.\\n\\n"
"And the second paragraph looks like this.'")
def test_natural_key_output(self):
rails_n00b = Competency.objects.create(framework='Ruby on Rails', level=1)
cake_adept = Competency.objects.create(framework='CakePHP', level=2)
spring_master = Competency.objects.create(framework='Spring', level=3)
django_guru = Competency.objects.create(framework='Django', level=4)
rails_job = JobPosting.objects.create(title='Rails Intern', main_competency=rails_n00b)
django_job = JobPosting.objects.create(title='Elder Django Deity', main_competency=django_guru)
misc_job = JobPosting.objects.create(title='A man of many talents', main_competency=spring_master)
django_job.additional_competencies.add(rails_n00b)
misc_job.additional_competencies.add(cake_adept, rails_n00b)
with string_stdout() as output:
call_command('dumpdata', 'tests', format='class', exclude=[
'tests.Party', 'tests.Politician'])
lines = output.getvalue().split('\n')
self.assertEqual(lines[4], "from tests.models import Competency, JobPosting")
self.assertEqual(lines[6], "tests_competency_fixture = Fixture(Competency)")
self.assertEqual(lines[7], "tests_jobposting_fixture = Fixture(JobPosting)")
# Django 1.4+ returns these lines in order, but Django 1.3 doesn't, so just check
# that they're included rather than included in order
self.assertTrue("tests_competency_fixture.add(1, **{'framework': u'Ruby on Rails', 'level': 1})" in lines)
self.assertTrue("tests_competency_fixture.add(2, **{'framework': u'CakePHP', 'level': 2})" in lines)
self.assertTrue("tests_competency_fixture.add(3, **{'framework': u'Spring', 'level': 3})" in lines)
self.assertTrue("tests_competency_fixture.add(4, **{'framework': u'Django', 'level': 4})" in lines)
self.assertEqual(lines[13], "tests_jobposting_fixture.add(1, **{'additional_competencies': [], 'main_competency': 1, 'title': u'Rails Intern'})")
self.assertEqual(lines[14], "tests_jobposting_fixture.add(2, **{'additional_competencies': [1], 'main_competency': 4, 'title': u'Elder Django Deity'})")
self.assertEqual(lines[15], "tests_jobposting_fixture.add(3, **{'additional_competencies': [1, 2], 'main_competency': 3, 'title': u'A man of many talents'})")
|
jklaiho/django-class-fixtures
|
class_fixtures/tests/tests_dumpdata.py
|
Python
|
bsd-3-clause
| 11,960 | 0.008027 |
## Absolute location where all raw files are
RAWDATA_DIR = '/home/cmb-06/as/skchoudh/dna/Oct_10_2016_HuR_Human_Mouse_Liver/rna-seq/Penalva_L_08182016/human'
## Output directory
OUT_DIR = '/staging/as/skchoudh/Oct_10_2016_HuR_Human_Mouse_Liver/RNA-Seq_human'
## Absolute location to 're-ribo/scripts' directory
SRC_DIR = '/home/cmb-panasas2/skchoudh/github_projects/re-ribo/scripts'
## Genome fasta location
GENOME_FASTA = '/home/cmb-panasas2/skchoudh/genomes/hg38/fasta/hg38.fa'
## Chromosome sizes location
CHROM_SIZES = '/home/cmb-panasas2/skchoudh/genomes/hg38/fasta/hg38.chrom.sizes'
## Path to STAR index (will be generated if does not exist)
STAR_INDEX = '/home/cmb-panasas2/skchoudh/genomes/hg38/star_annotated'
## GTF path
GTF = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.annotation.without_rRNA_tRNA.gtf'
## GenePred bed downloaded from UCSC
## (this is used for inferring the type of experiment i.e stranded/non-stranded
## and hence is not required)
GENE_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v24.genes.bed'
## Path to bed file with start codon coordinates
START_CODON_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.start_codon.bed'
## Path to bed file with stop codon coordinates
STOP_CODON_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.stop_codon.bed'
## Path to bed file containing CDS coordinates
CDS_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.cds.bed'
# We don't have these so just use CDs bed to get the pipeline running
UTR5_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.UTR5.bed'
UTR3_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.UTR3.bed'
## Name of python2 environment
## The following package needs to be installed in that environment
## numpy scipy matploltib seaborn pysam pybedtools htseq
## you can do: conda create -n python2 PYTHON=2 && source activate python2 && conda install numpy scipy matploltib seaborn pysam pybedtools htseq
PYTHON2ENV = 'python2'
############################################Do Not Edit#############################################
HTSEQ_STRANDED = 'yes'
FEATURECOUNTS_S = '-s 1'
FEATURECOUNTS_T = 'CDS'
HTSEQ_MODE = 'intersection-strict'
|
saketkc/ribo-seq-snakemake
|
configs/Oct_10_2016_HuR_Human_rna.py
|
Python
|
bsd-3-clause
| 2,343 | 0.012804 |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: suite.py
"""TestSuite"""
import sys
from . import case
from . import util
__unittest = True
def _call_if_exists(parent, attr):
func = getattr(parent, attr, lambda : None)
func()
class BaseTestSuite(object):
"""A simple test suite that doesn't provide class or module shared fixtures.
"""
def __init__(self, tests=()):
self._tests = []
self.addTests(tests)
def __repr__(self):
return '<%s tests=%s>' % (util.strclass(self.__class__), list(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return list(self) == list(other)
def __ne__(self, other):
return not self == other
__hash__ = None
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = 0
for test in self:
cases += test.countTestCases()
return cases
def addTest(self, test):
if not hasattr(test, '__call__'):
raise TypeError('{} is not callable'.format(repr(test)))
if isinstance(test, type) and issubclass(test, (
case.TestCase, TestSuite)):
raise TypeError('TestCases and TestSuites must be instantiated before passing them to addTest()')
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, basestring):
raise TypeError('tests must be an iterable of tests, not a string')
for test in tests:
self.addTest(test)
def run(self, result):
for test in self:
if result.shouldStop:
break
test(result)
return result
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self:
test.debug()
class TestSuite(BaseTestSuite):
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def run(self, result, debug=False):
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
for test in self:
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if getattr(test.__class__, '_classSetupFailed', False) or getattr(result, '_moduleSetUpFailed', False):
continue
if not debug:
test(result)
else:
test.debug()
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
return result
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
debug = _DebugResult()
self.run(debug, True)
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
else:
if result._moduleSetUpFailed:
return
if getattr(currentClass, '__unittest_skip__', False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
_call_if_exists(result, '_setupStdout')
try:
try:
setUpClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
return
def _get_previous_module(self, result):
previousModule = None
previousClass = getattr(result, '_previousTestClass', None)
if previousClass is not None:
previousModule = previousClass.__module__
return previousModule
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
else:
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
_call_if_exists(result, '_setupStdout')
try:
try:
setUpModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
return
def _addClassOrModuleLevelException(self, result, exception, errorName):
error = _ErrorHolder(errorName)
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None and isinstance(exception, case.SkipTest):
addSkip(error, str(exception))
else:
result.addError(error, sys.exc_info())
return
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
else:
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
_call_if_exists(result, '_setupStdout')
try:
try:
tearDownModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
return
def _tearDownPreviousClass(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
else:
if getattr(previousClass, '_classSetupFailed', False):
return
if getattr(result, '_moduleSetUpFailed', False):
return
if getattr(previousClass, '__unittest_skip__', False):
return
tearDownClass = getattr(previousClass, 'tearDownClass', None)
if tearDownClass is not None:
_call_if_exists(result, '_setupStdout')
try:
try:
tearDownClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
className = util.strclass(previousClass)
errorName = 'tearDownClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
return
class _ErrorHolder(object):
"""
Placeholder for a TestCase inside a result. As far as a TestResult
is concerned, this looks exactly like a unit test. Used to insert
arbitrary errors into a test suite run.
"""
failureException = None
def __init__(self, description):
self.description = description
def id(self):
return self.description
def shortDescription(self):
return None
def __repr__(self):
return '<ErrorHolder description=%r>' % (self.description,)
def __str__(self):
return self.id()
def run(self, result):
pass
def __call__(self, result):
return self.run(result)
def countTestCases(self):
return 0
def _isnotsuite(test):
"""A crude way to tell apart testcases and suites with duck-typing"""
try:
iter(test)
except TypeError:
return True
return False
class _DebugResult(object):
"""Used by the TestSuite to hold previous class when running in debug."""
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/unittest/suite.py
|
Python
|
unlicense
| 10,084 | 0.00119 |
"""
This module is deprecated -- use scipy.linalg.blas instead
"""
from __future__ import division, print_function, absolute_import
try:
from ._cblas import *
except ImportError:
empty_module = True
import numpy as _np
@_np.deprecate(old_name="scipy.linalg.cblas", new_name="scipy.linalg.blas")
def _deprecate():
pass
_deprecate()
|
beiko-lab/gengis
|
bin/Lib/site-packages/scipy/linalg/cblas.py
|
Python
|
gpl-3.0
| 362 | 0.002762 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.