text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
"""Provides Django-Admin form field."""
# coding=utf-8
from django.utils.translation import ugettext_lazy as _
from django.forms.fields import Field, ValidationError
from tempo.django.widgets import RecurrentEventSetWidget
from tempo.recurrenteventset import RecurrentEventSet
class RecurrentEventSetField(Field):
"""Form field, for usage in admin forms.
Represents RecurrentEventSet."""
# pylint: disable=no-init
widget = RecurrentEventSetWidget
def clean(self, value):
"""Cleans and validates RecurrentEventSet expression."""
# pylint: disable=no-self-use
if value is None:
return None
if not RecurrentEventSet.validate_json(value):
raise ValidationError(_('Invalid input.'),
code='invalid')
return RecurrentEventSet.from_json(value)
|
AndrewPashkin/python-tempo
|
src/tempo/django/forms.py
|
Python
|
bsd-3-clause
| 859 | 0 |
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals, absolute_import
import json
import pytest
import requests
import requests.exceptions
from tests.constants import LOCALHOST_REGISTRY_HTTP, DOCKER0_REGISTRY_HTTP, MOCK, TEST_IMAGE
from tests.util import uuid_value
from osbs.utils import ImageName
from atomic_reactor.core import ContainerTasker
from atomic_reactor.constants import CONTAINER_DOCKERPY_BUILD_METHOD
from atomic_reactor.inner import DockerBuildWorkflow
from tests.constants import MOCK_SOURCE
if MOCK:
from tests.docker_mock import mock_docker
@pytest.fixture()
def temp_image_name():
return ImageName(repo=("atomic-reactor-tests-%s" % uuid_value()))
@pytest.fixture()
def is_registry_running():
"""
is docker registry running (at {docker0,lo}:5000)?
"""
try:
lo_response = requests.get(LOCALHOST_REGISTRY_HTTP)
except requests.exceptions.ConnectionError:
return False
if not lo_response.ok:
return False
try:
lo_response = requests.get(DOCKER0_REGISTRY_HTTP) # leap of faith
except requests.exceptions.ConnectionError:
return False
if not lo_response.ok:
return False
return True
@pytest.fixture(scope="module")
def docker_tasker():
if MOCK:
mock_docker()
ct = ContainerTasker(retry_times=0)
ct.build_method = CONTAINER_DOCKERPY_BUILD_METHOD
return ct
@pytest.fixture(params=[True, False])
def reactor_config_map(request):
return request.param
@pytest.fixture(params=[True, False])
def inspect_only(request):
return request.param
@pytest.fixture
def user_params(monkeypatch):
"""
Setting default image_tag in the env var USER_PARAMS. Any tests requiring
to create an instance of :class:`DockerBuildWorkflow` requires this fixture.
"""
monkeypatch.setenv('USER_PARAMS', json.dumps({'image_tag': TEST_IMAGE}))
@pytest.fixture
def workflow(user_params):
return DockerBuildWorkflow(source=MOCK_SOURCE)
@pytest.mark.optionalhook
def pytest_html_results_table_row(report, cells):
if report.passed or report.skipped:
del cells[:]
|
projectatomic/atomic-reactor
|
tests/conftest.py
|
Python
|
bsd-3-clause
| 2,306 | 0.000867 |
from sympy import (Abs, C, Dummy, Max, Min, Rational, Float, S, Symbol, cos, oo,
pi, simplify, sqrt, symbols)
from sympy.geometry import (Circle, Curve, Ellipse, GeometryError, Line, Point,
Polygon, Ray, RegularPolygon, Segment, Triangle,
are_similar, convex_hull, intersection)
from sympy.utilities.pytest import raises, XFAIL
x = Symbol('x', real=True)
y = Symbol('y', real=True)
t = Symbol('t', real=True)
x1 = Symbol('x1', real=True)
x2 = Symbol('x2', real=True)
y1 = Symbol('y1', real=True)
y2 = Symbol('y2', real=True)
half = Rational(1,2)
def feq(a, b):
"""Test if two floating point values are 'equal'."""
t = Float("1.0E-10")
return -t < a-b < t
def test_curve():
s = Symbol('s')
z = Symbol('z')
# this curve is independent of the indicated parameter
C = Curve([2*s, s**2], (z, 0, 2))
assert C.parameter == z
assert C.functions == (2*s, s**2)
assert C.arbitrary_point() == Point(2*s, s**2)
assert C.arbitrary_point(z) == Point(2*s, s**2)
# this is how it is normally used
C = Curve([2*s, s**2], (s, 0, 2))
assert C.parameter == s
assert C.functions == (2*s, s**2)
t = Symbol('t')
assert C.arbitrary_point() != Point(2*t, t**2) # the t returned as assumptions
t = Symbol('t', real=True) # now t has the same assumptions so the test passes
assert C.arbitrary_point() == Point(2*t, t**2)
assert C.arbitrary_point(z) == Point(2*z, z**2)
assert C.arbitrary_point(C.parameter) == Point(2*s, s**2)
raises(ValueError, 'Curve((s, s + t), (s, 1, 2)).arbitrary_point()')
raises(ValueError, 'Curve((s, s + t), (t, 1, 2)).arbitrary_point(s)')
def test_point():
p1 = Point(x1, x2)
p2 = Point(y1, y2)
p3 = Point(0, 0)
p4 = Point(1, 1)
assert len(p1) == 1
assert p1 in p1
assert p1 not in p2
assert p2[1] == y2
assert (p3+p4) == p4
assert (p2-p1) == Point(y1-x1, y2-x2)
assert p4*5 == Point(5, 5)
assert -p2 == Point(-y1, -y2)
assert Point.midpoint(p3, p4) == Point(half, half)
assert Point.midpoint(p1, p4) == Point(half + half*x1, half + half*x2)
assert Point.midpoint(p2, p2) == p2
assert p2.midpoint(p2) == p2
assert Point.distance(p3, p4) == sqrt(2)
assert Point.distance(p1, p1) == 0
assert Point.distance(p3, p2) == sqrt(p2.x**2 + p2.y**2)
p1_1 = Point(x1, x1)
p1_2 = Point(y2, y2)
p1_3 = Point(x1 + 1, x1)
assert Point.is_collinear(p3)
assert Point.is_collinear(p3, p4)
assert Point.is_collinear(p3, p4, p1_1, p1_2)
assert Point.is_collinear(p3, p4, p1_1, p1_3) == False
x_pos = Symbol('x', real=True, positive=True)
p2_1 = Point(x_pos, 0)
p2_2 = Point(0, x_pos)
p2_3 = Point(-x_pos, 0)
p2_4 = Point(0, -x_pos)
p2_5 = Point(x_pos, 5)
assert Point.is_concyclic(p2_1)
assert Point.is_concyclic(p2_1, p2_2)
assert Point.is_concyclic(p2_1, p2_2, p2_3, p2_4)
assert Point.is_concyclic(p2_1, p2_2, p2_3, p2_5) == False
def test_line():
p1 = Point(0, 0)
p2 = Point(1, 1)
p3 = Point(x1, x1)
p4 = Point(y1, y1)
p5 = Point(x1, 1 + x1)
p6 = Point(1, 0)
p7 = Point(0, 1)
p8 = Point(2, 0)
p9 = Point(2, 1)
l1 = Line(p1, p2)
l2 = Line(p3, p4)
l3 = Line(p3, p5)
l4 = Line(p1, p6)
l5 = Line(p1, p7)
l6 = Line(p8, p9)
l7 = Line(p2, p9)
# Basic stuff
assert Line((1, 1), slope=1) == Line((1, 1), (2, 2))
assert Line((1, 1), slope=oo) == Line((1, 1), (1, 2))
assert Line((1, 1), slope=-oo) == Line((1, 1), (1, 2))
raises(ValueError, 'Line((1, 1), 1)')
assert Line(p1, p2) == Line(p2, p1)
assert l1 == l2
assert l1 != l3
assert l1.slope == 1
assert l3.slope == oo
assert l4.slope == 0
assert l4.coefficients == (0, 1, 0)
assert l4.equation(x=x, y=y) == y
assert l5.slope == oo
assert l5.coefficients == (1, 0, 0)
assert l5.equation() == x
assert l6.equation() == x - 2
assert l7.equation() == y - 1
assert p1 in l1 # is p1 on the line l1?
assert p1 not in l3
assert simplify(l1.equation()) in (x-y, y-x)
assert simplify(l3.equation()) in (x-x1, x1-x)
assert l2.arbitrary_point() in l2
for ind in xrange(0, 5):
assert l3.random_point() in l3
# Orthogonality
p1_1 = Point(-x1, x1)
l1_1 = Line(p1, p1_1)
assert l1.perpendicular_line(p1) == l1_1
assert Line.is_perpendicular(l1, l1_1)
assert Line.is_perpendicular(l1 , l2) == False
# Parallelity
p2_1 = Point(-2*x1, 0)
l2_1 = Line(p3, p5)
assert l2.parallel_line(p1_1) == Line(p2_1, p1_1)
assert l2_1.parallel_line(p1) == Line(p1, Point(0, 2))
assert Line.is_parallel(l1, l2)
assert Line.is_parallel(l2, l3) == False
assert Line.is_parallel(l2, l2.parallel_line(p1_1))
assert Line.is_parallel(l2_1, l2_1.parallel_line(p1))
# Intersection
assert intersection(l1, p1) == [p1]
assert intersection(l1, p5) == []
assert intersection(l1, l2) in [[l1], [l2]]
assert intersection(l1, l1.parallel_line(p5)) == []
# Concurrency
l3_1 = Line(Point(5, x1), Point(-Rational(3,5), x1))
assert Line.is_concurrent(l1, l3)
assert Line.is_concurrent(l1, l3, l3_1)
assert Line.is_concurrent(l1, l1_1, l3) == False
# Projection
assert l2.projection(p4) == p4
assert l1.projection(p1_1) == p1
assert l3.projection(p2) == Point(x1, 1)
# Finding angles
l1_1 = Line(p1, Point(5, 0))
assert feq(Line.angle_between(l1, l1_1).evalf(), pi.evalf()/4)
# Testing Rays and Segments (very similar to Lines)
assert Ray((1, 1), angle=pi/4) == Ray((1, 1), (2, 2))
assert Ray((1, 1), angle=pi/2) == Ray((1, 1), (1, 2))
assert Ray((1, 1), angle=-pi/2) == Ray((1, 1), (1, 0))
assert Ray((1, 1), angle=-3*pi/2) == Ray((1, 1), (1, 2))
assert Ray((1, 1), angle=5*pi/2) == Ray((1, 1), (1, 2))
assert Ray((1, 1), angle=5.0*pi/2) == Ray((1, 1), (1, 2))
assert Ray((1, 1), angle=pi) == Ray((1, 1), (0, 1))
assert Ray((1, 1), angle=3.0*pi) == Ray((1, 1), (0, 1))
assert Ray((1, 1), angle=4.0*pi) == Ray((1, 1), (2, 1))
assert Ray((1, 1), angle=0) == Ray((1, 1), (2, 1))
# XXX don't know why this fails without str
assert str(Ray((1, 1), angle=4.2*pi)) == str(Ray(Point(1, 1), Point(2, 1 + C.tan(0.2*pi))))
assert Ray((1, 1), angle=5) == Ray((1, 1), (2, 1 + C.tan(5)))
raises(ValueError, 'Ray((1, 1), 1)')
r1 = Ray(p1, Point(-1, 5))
r2 = Ray(p1, Point(-1, 1))
r3 = Ray(p3, p5)
assert l1.projection(r1) == Ray(p1, p2)
assert l1.projection(r2) == p1
assert r3 != r1
t = Symbol('t', real=True)
assert Ray((1, 1), angle=pi/4).arbitrary_point() == Point(1/(1 - t), 1/(1 - t))
s1 = Segment(p1, p2)
s2 = Segment(p1, p1_1)
assert s1.midpoint == Point(Rational(1,2), Rational(1,2))
assert s2.length == sqrt( 2*(x1**2) )
assert s1.perpendicular_bisector() == Line(Point(0, 1), Point(1, 0))
assert Segment((1, 1), (2, 3)).arbitrary_point() == Point(1 + t, 1 + 2*t)
# Segment contains
a, b = symbols('a,b')
s = Segment((0, a), (0, b))
assert Point(0, (a + b)/2) in s
s = Segment((a, 0), (b, 0))
assert Point((a + b)/2, 0) in s
assert (Point(2*a, 0) in s) is False # XXX should be None?
# Testing distance from a Segment to an object
s1 = Segment(Point(0, 0), Point(1, 1))
s2 = Segment(Point(half, half), Point(1, 0))
pt1 = Point(0, 0)
pt2 = Point(Rational(3)/2, Rational(3)/2)
assert s1.distance(pt1) == 0
assert s2.distance(pt1) == 2**(half)/2
assert s2.distance(pt2) == 2**(half)
# Special cases of projection and intersection
r1 = Ray(Point(1, 1), Point(2, 2))
r2 = Ray(Point(2, 2), Point(0, 0))
r3 = Ray(Point(1, 1), Point(-1, -1))
r4 = Ray(Point(0, 4), Point(-1, -5))
assert intersection(r1, r2) == [Segment(Point(1, 1), Point(2, 2))]
assert intersection(r1, r3) == [Point(1, 1)]
assert r1.projection(r3) == Point(1, 1)
assert r1.projection(r4) == Segment(Point(1, 1), Point(2, 2))
r5 = Ray(Point(0, 0), Point(0, 1))
r6 = Ray(Point(0, 0), Point(0, 2))
assert r5 in r6
assert r6 in r5
s1 = Segment(Point(0, 0), Point(2, 2))
s2 = Segment(Point(-1, 5), Point(-5, -10))
s3 = Segment(Point(0, 4), Point(-2, 2))
assert intersection(r1, s1) == [Segment(Point(1, 1), Point(2, 2))]
assert r1.projection(s2) == Segment(Point(1, 1), Point(2, 2))
assert s3.projection(r1) == Segment(Point(0, 4), Point(-1, 3))
l1 = Line(Point(0, 0), Point(3, 4))
r1 = Ray(Point(0, 0), Point(3, 4))
s1 = Segment(Point(0, 0), Point(3, 4))
assert intersection(l1, l1) == [l1]
assert intersection(l1, r1) == [r1]
assert intersection(l1, s1) == [s1]
assert intersection(r1, l1) == [r1]
assert intersection(s1, l1) == [s1]
entity1 = Segment(Point(-10,10), Point(10,10))
entity2 = Segment(Point(-5,-5), Point(-5,5))
assert intersection(entity1, entity2) == []
def test_ellipse():
p1 = Point(0, 0)
p2 = Point(1, 1)
p3 = Point(x1, x2)
p4 = Point(0, 1)
p5 = Point(-1, 0)
e1 = Ellipse(p1, 1, 1)
e2 = Ellipse(p2, half, 1)
e3 = Ellipse(p1, y1, y1)
c1 = Circle(p1, 1)
c2 = Circle(p2,1)
c3 = Circle(Point(sqrt(2),sqrt(2)),1)
# Test creation with three points
cen, rad = Point(3*half, 2), 5*half
assert Circle(Point(0,0), Point(3,0), Point(0,4)) == Circle(cen, rad)
raises(GeometryError, "Circle(Point(0,0), Point(1,1), Point(2,2))")
# Basic Stuff
assert e1 == c1
assert e1 != e2
assert p4 in e1
assert p2 not in e2
assert e1.area == pi
assert e2.area == pi/2
assert e3.area == pi*(y1**2)
assert c1.area == e1.area
assert c1.circumference == e1.circumference
assert e3.circumference == 2*pi*y1
# with generic symbols, the hradius is assumed to contain the major radius
M = Symbol('M')
m = Symbol('m')
c = Ellipse(p1, M, m).circumference
_x = c.atoms(Dummy).pop()
assert c == \
4*M*C.Integral(sqrt((1 - _x**2*(M**2 - m**2)/M**2)/(1 - _x**2)), (_x, 0, 1))
assert e2.arbitrary_point() in e2
# Foci
f1, f2 = Point(sqrt(12), 0), Point(-sqrt(12), 0)
ef = Ellipse(Point(0, 0), 4, 2)
assert ef.foci in [(f1, f2), (f2, f1)]
# Tangents
v = sqrt(2) / 2
p1_1 = Point(v, v)
p1_2 = p2 + Point(half, 0)
p1_3 = p2 + Point(0, 1)
assert e1.tangent_lines(p4) == c1.tangent_lines(p4)
assert e2.tangent_lines(p1_2) == [Line(p1_2, p2 + Point(half, 1))]
assert e2.tangent_lines(p1_3) == [Line(p1_3, p2 + Point(half, 1))]
assert c1.tangent_lines(p1_1) == [Line(p1_1, Point(0, sqrt(2)))]
assert e2.is_tangent(Line(p1_2, p2 + Point(half, 1)))
assert e2.is_tangent(Line(p1_3, p2 + Point(half, 1)))
assert c1.is_tangent(Line(p1_1, Point(0, sqrt(2))))
assert e1.is_tangent(Line(Point(0, 0), Point(1, 1))) == False
assert Ellipse(Point(5, 5), 2, 1).tangent_lines(Point(0, 0)) == \
[Line(Point(0, 0), Point(S(77)/25, S(132)/25)),
Line(Point(0, 0), Point(S(33)/5, S(22)/5))]
assert Ellipse(Point(5, 5), 2, 1).tangent_lines(Point(3, 4)) == \
[Line(Point(3, 4), Point(3, 5)), Line(Point(3, 4), Point(5, 4))]
assert Circle(Point(5, 5), 2).tangent_lines(Point(3, 3)) == \
[Line(Point(3, 3), Point(3, 5)), Line(Point(3, 3), Point(5, 3))]
assert Circle(Point(5, 5), 2).tangent_lines(Point(5 - 2*sqrt(2), 5)) == \
[Line(Point(5 - 2*sqrt(2), 5), Point(5 - sqrt(2), 5 - sqrt(2))),
Line(Point(5 - 2*sqrt(2), 5), Point(5 - sqrt(2), 5 + sqrt(2))),]
# Properties
major = 3
minor = 1
e4 = Ellipse(p2, minor, major)
assert e4.focus_distance == sqrt(major**2 - minor**2)
ecc = e4.focus_distance / major
assert e4.eccentricity == ecc
assert e4.periapsis == major*(1 - ecc)
assert e4.apoapsis == major*(1 + ecc)
# independent of orientation
e4 = Ellipse(p2, major, minor)
assert e4.focus_distance == sqrt(major**2 - minor**2)
ecc = e4.focus_distance / major
assert e4.eccentricity == ecc
assert e4.periapsis == major*(1 - ecc)
assert e4.apoapsis == major*(1 + ecc)
# Intersection
l1 = Line(Point(1, -5), Point(1, 5))
l2 = Line(Point(-5, -1), Point(5, -1))
l3 = Line(Point(-1, -1), Point(1, 1))
l4 = Line(Point(-10, 0), Point(0, 10))
pts_c1_l3 = [Point(sqrt(2)/2, sqrt(2)/2), Point(-sqrt(2)/2, -sqrt(2)/2)]
assert intersection(e2, l4) == []
assert intersection(c1, Point(1, 0)) == [Point(1, 0)]
assert intersection(c1, l1) == [Point(1, 0)]
assert intersection(c1, l2) == [Point(0, -1)]
assert intersection(c1, l3) in [pts_c1_l3, [pts_c1_l3[1], pts_c1_l3[0]]]
assert intersection(c1, c2) in [[(1,0), (0,1)],[(0,1),(1,0)]]
assert intersection(c1, c3) == [(sqrt(2)/2, sqrt(2)/2)]
# some special case intersections
csmall = Circle(p1, 3)
cbig = Circle(p1, 5)
cout = Circle(Point(5, 5), 1)
# one circle inside of another
assert csmall.intersection(cbig) == []
# separate circles
assert csmall.intersection(cout) == []
# coincident circles
assert csmall.intersection(csmall) == csmall
v = sqrt(2)
t1 = Triangle(Point(0, v), Point(0, -v), Point(v, 0))
points = intersection(t1, c1)
assert len(points) == 4
assert Point(0, 1) in points
assert Point(0, -1) in points
assert Point(v/2, v/2) in points
assert Point(v/2, -v/2) in points
circ = Circle(Point(0, 0), 5)
elip = Ellipse(Point(0, 0), 5, 20)
assert intersection(circ, elip) in \
[[Point(5, 0), Point(-5, 0)], [Point(-5, 0), Point(5, 0)]]
assert elip.tangent_lines(Point(0, 0)) == []
elip = Ellipse(Point(0, 0), 3, 2)
assert elip.tangent_lines(Point(3, 0)) == [Line(Point(3, 0), Point(3, -12))]
e1 = Ellipse(Point(0, 0), 5, 10)
e2 = Ellipse(Point(2, 1), 4, 8)
a = S(53)/17
c = 2*sqrt(3991)/17
assert e1.intersection(e2) == [Point(a - c/8, a/2 + c), Point(a + c/8, a/2 - c)]
# Combinations of above
assert e3.is_tangent(e3.tangent_lines(p1 + Point(y1, 0))[0])
e = Ellipse((1, 2), 3, 2)
assert e.tangent_lines(Point(10, 0)) == \
[Line(Point(10, 0), Point(1, 0)),
Line(Point(10, 0), Point(S(14)/5, S(18)/5))]
# encloses_point
e = Ellipse((0, 0), 1, 2)
assert e.encloses_point(e.center)
assert e.encloses_point(e.center + Point(0, e.vradius - Rational(1, 10)))
assert e.encloses_point(e.center + Point(e.hradius - Rational(1, 10), 0))
assert e.encloses_point(e.center + Point(e.hradius, 0)) is False
assert e.encloses_point(e.center + Point(e.hradius + Rational(1, 10), 0)) is False
e = Ellipse((0, 0), 2, 1)
assert e.encloses_point(e.center)
assert e.encloses_point(e.center + Point(0, e.vradius - Rational(1, 10)))
assert e.encloses_point(e.center + Point(e.hradius - Rational(1, 10), 0))
assert e.encloses_point(e.center + Point(e.hradius, 0)) is False
assert e.encloses_point(e.center + Point(e.hradius + Rational(1, 10), 0)) is False
def test_ellipse_random_point():
e3 = Ellipse(Point(0, 0), y1, y1)
rx, ry = Symbol('rx'), Symbol('ry')
for ind in xrange(0, 5):
r = e3.random_point()
# substitution should give zero*y1**2
assert e3.equation(rx, ry).subs(zip((rx, ry), r.args)
).n(3).as_coeff_Mul()[0] < 1e-10
def test_polygon():
t = Triangle(Point(0, 0), Point(2, 0), Point(3, 3))
assert Polygon(Point(0, 0), Point(1, 0), Point(2, 0), Point(3, 3)) == t
assert Polygon(Point(1, 0), Point(2, 0), Point(3, 3), Point(0, 0)) == t
assert Polygon(Point(2, 0), Point(3, 3), Point(0, 0), Point(1, 0)) == t
p1 = Polygon(
Point(0, 0), Point(3,-1),
Point(6, 0), Point(4, 5),
Point(2, 3), Point(0, 3))
p2 = Polygon(
Point(6, 0), Point(3,-1),
Point(0, 0), Point(0, 3),
Point(2, 3), Point(4, 5))
p3 = Polygon(
Point(0, 0), Point(3, 0),
Point(5, 2), Point(4, 4))
p4 = Polygon(
Point(0, 0), Point(4, 4),
Point(5, 2), Point(3, 0))
#
# General polygon
#
assert p1 == p2
assert len(p1) == 6
assert len(p1.sides) == 6
assert p1.perimeter == 5+2*sqrt(10)+sqrt(29)+sqrt(8)
assert p1.area == 22
assert not p1.is_convex()
assert p3.is_convex()
assert p4.is_convex() # ensure convex for both CW and CCW point specification
#
# Regular polygon
#
p1 = RegularPolygon(Point(0, 0), 10, 5)
p2 = RegularPolygon(Point(0, 0), 5, 5)
assert p1 != p2
assert p1.interior_angle == 3*pi/5
assert p1.exterior_angle == 2*pi/5
assert p2.apothem == 5*cos(pi/5)
assert p2.circumcircle == Circle(Point(0, 0), 5)
assert p2.incircle == Circle(Point(0, 0), p2.apothem)
assert p1.is_convex()
assert p1.rotation == 0
p1.spin(pi/3)
assert p1.rotation == pi/3
assert p1[0] == Point(5, 5*sqrt(3))
# while spin works in place (notice that rotation is 2pi/3 below)
# rotate returns a new object
p1_old = p1
assert p1.rotate(pi/3) == RegularPolygon(Point(0, 0), 10, 5, 2*pi/3)
assert p1 == p1_old
#
# Angles
#
angles = p4.angles
assert feq(angles[Point(0, 0)].evalf(), Float("0.7853981633974483"))
assert feq(angles[Point(4, 4)].evalf(), Float("1.2490457723982544"))
assert feq(angles[Point(5, 2)].evalf(), Float("1.8925468811915388"))
assert feq(angles[Point(3, 0)].evalf(), Float("2.3561944901923449"))
angles = p3.angles
assert feq(angles[Point(0, 0)].evalf(), Float("0.7853981633974483"))
assert feq(angles[Point(4, 4)].evalf(), Float("1.2490457723982544"))
assert feq(angles[Point(5, 2)].evalf(), Float("1.8925468811915388"))
assert feq(angles[Point(3, 0)].evalf(), Float("2.3561944901923449"))
#
# Triangle
#
p1 = Point(0, 0)
p2 = Point(5, 0)
p3 = Point(0, 5)
t1 = Triangle(p1, p2, p3)
t2 = Triangle(p1, p2, Point(Rational(5,2), sqrt(Rational(75,4))))
t3 = Triangle(p1, Point(x1, 0), Point(0, x1))
s1 = t1.sides
s2 = t2.sides
s3 = t3.sides
# Basic stuff
assert Triangle(p1, p1, p1) == p1
assert Triangle(p2, p2*2, p2*3) == Segment(p2, p2*3)
assert t1.area == Rational(25,2)
assert t1.is_right()
assert t2.is_right() == False
assert t3.is_right()
assert p1 in t1
assert t1.sides[0] in t1
assert Segment((0, 0), (1, 0)) in t1
assert Point(5, 5) not in t2
assert t1.is_convex()
assert feq(t1.angles[p1].evalf(), pi.evalf()/2)
assert t1.is_equilateral() == False
assert t2.is_equilateral()
assert t3.is_equilateral() == False
assert are_similar(t1, t2) == False
assert are_similar(t1, t3)
assert are_similar(t2, t3) == False
# Bisectors
bisectors = t1.bisectors()
assert bisectors[p1] == Segment(p1, Point(Rational(5,2), Rational(5,2)))
ic = (250 - 125*sqrt(2)) / 50
assert t1.incenter == Point(ic, ic)
# Inradius
assert t1.inradius == 5 - 5*sqrt(2)/2
assert t2.inradius == 5*sqrt(3)/6
assert t3.inradius == x1**2/((2 + sqrt(2))*Abs(x1))
# Medians + Centroid
m = t1.medians
assert t1.centroid == Point(Rational(5,3), Rational(5,3))
assert m[p1] == Segment(p1, Point(Rational(5,2), Rational(5,2)))
assert t3.medians[p1] == Segment(p1, Point(x1/2, x1/2))
assert intersection(m[p1], m[p2], m[p3]) == [t1.centroid]
# Perpendicular
altitudes = t1.altitudes
assert altitudes[p1] == Segment(p1, Point(Rational(5,2), Rational(5,2)))
assert altitudes[p2] == s1[0]
assert altitudes[p3] == s1[2]
# Ensure
assert len(intersection(*bisectors.values())) == 1
assert len(intersection(*altitudes.values())) == 1
assert len(intersection(*m.values())) == 1
# Distance
p1 = Polygon(
Point(0, 0), Point(1, 0),
Point(1, 1), Point(0, 1))
p2 = Polygon(
Point(0, Rational(5)/4), Point(1, Rational(5)/4),
Point(1, Rational(9)/4), Point(0, Rational(9)/4))
p3 = Polygon(
Point(1, 2), Point(2, 2),
Point(2, 1))
p4 = Polygon(
Point(1, 1), Point(Rational(6)/5, 1),
Point(1, Rational(6)/5))
p5 = Polygon(
Point(half, 3**(half)/2), Point(-half, 3**(half)/2),
Point(-1, 0), Point(-half, -(3)**(half)/2),
Point(half, -(3)**(half)/2), Point(1, 0))
p6 = Polygon(Point(2, Rational(3)/10), Point(Rational(17)/10, 0),
Point(2, -Rational(3)/10), Point(Rational(23)/10, 0))
pt1 = Point(half, half)
pt2 = Point(1, 1)
'''Polygon to Point'''
assert p1.distance(pt1) == half
assert p1.distance(pt2) == 0
assert p2.distance(pt1) == Rational(3)/4
assert p3.distance(pt2) == sqrt(2)/2
@XFAIL
def test_polygon_to_polygon():
'''Polygon to Polygon'''
# XXX: Because of the way the warnings filters work, this will fail if it's
# run more than once in the same session. See issue 2492.
import warnings
# p1.distance(p2) emits a warning
# First, test the warning
warnings.filterwarnings("error", "Polygons may intersect producing erroneous output")
raises(UserWarning, "p1.distance(p2)")
# now test the actual output
warnings.filterwarnings("ignore", "Polygons may intersect producing erroneous output")
assert p1.distance(p2) == half/2
# Keep testing reasonably thread safe, so reset the warning
warnings.filterwarnings("default", "Polygons may intersect producing erroneous output")
# Note, in Python 2.6+, this can be done more nicely using the
# warnings.catch_warnings context manager.
# See http://docs.python.org/library/warnings#testing-warnings.
assert p1.distance(p3) == sqrt(2)/2
assert p3.distance(p4) == (sqrt(2)/2 - sqrt(Rational(2)/25)/2)
assert p5.distance(p6) == Rational(7)/10
def test_convex_hull():
p = [Point(-5,-1), Point(-2,1), Point(-2,-1), Point(-1,-3), Point(0,0),
Point(1,1), Point(2,2), Point(2,-1), Point(3,1), Point(4,-1), Point(6,2)]
ch = Polygon(p[0], p[3], p[9], p[10], p[6], p[1])
#test handling of duplicate points
p.append(p[3])
#more than 3 collinear points
another_p = [Point(-45, -85), Point(-45, 85), Point(-45,26),Point(-45,-24)]
ch2 = Segment(another_p[0],another_p[1])
assert convex_hull(*another_p) == ch2
assert convex_hull(*p) == ch
assert convex_hull(p[0]) == p[0]
assert convex_hull(p[0], p[1]) == Segment(p[0], p[1])
# no unique points
assert convex_hull(*[p[-1]]*3) == p[-1]
# collection of items
assert convex_hull(*[Point(0,0),
Segment(Point(1, 0), Point(1, 1)),
RegularPolygon(Point(2, 0), 2, 4)]) == \
Polygon(Point(0, 0), Point(2, -2), Point(4, 0), Point(2, 2))
def test_concyclic_doctest_bug():
p1,p2 = Point(-1, 0), Point(1, 0)
p3,p4 = Point(0, 1), Point(-1, 2)
assert Point.is_concyclic(p1, p2, p3)
assert not Point.is_concyclic(p1, p2, p3, p4)
def test_subs():
p = Point(x, 2)
q = Point(1, 1)
r = Point(3, 4)
for o in [p,
Segment(p, q),
Ray(p, q),
Line(p, q),
Triangle(p, q, r),
RegularPolygon(p, 3, 6),
Polygon(p, q, r, Point(5,4)),
Circle(p, 3),
Ellipse(p, 3, 4)]:
assert 'y' in str(o.subs(x, y))
def test_encloses():
# square with a dimpled left side
s = Polygon(Point(0, 0), Point(1, 0), Point(1, 1), Point(0, 1), Point(S.Half, S.Half))
# the following will be True if the polygon isn't treated as closing on itself
assert s.encloses(Point(0, S.Half)) is False
assert s.encloses(Point(S.Half, S.Half)) is False # it's a vertex
assert s.encloses(Point(Rational(3, 4), S.Half)) is True
def test_free_symbols():
a, b, c, d, e, f, s = symbols('a:f,s')
assert Point(a,b).free_symbols == set([a, b])
assert Line((a,b),(c,d)).free_symbols == set([a, b, c, d])
assert Ray((a,b),(c,d)).free_symbols == set([a, b, c, d])
assert Ray((a,b),angle=c).free_symbols == set([a, b, c])
assert Segment((a,b),(c,d)).free_symbols == set([a, b, c, d])
assert Line((a,b),slope=c).free_symbols == set([a, b, c])
assert Curve((a*s,b*s),(s,c,d)).free_symbols == set([a, b, c, d])
assert Ellipse((a,b),c,d).free_symbols == set([a, b, c, d])
assert Ellipse((a,b),c, eccentricity=d).free_symbols == set([a, b, c, d])
assert Ellipse((a,b),vradius=c, eccentricity=d).free_symbols == set([a, b, c, d])
assert Circle((a,b),c).free_symbols == set([a, b, c])
assert Circle((a,b),(c,d),(e,f)).free_symbols == set([e, d, c, b, f, a])
assert Polygon((a,b),(c,d),(e,f)).free_symbols == set([e, b, d, f, a, c])
assert RegularPolygon((a,b),c,d,e).free_symbols == set([e, a, b, c, d])
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/sympy/geometry/tests/test_geometry.py
|
Python
|
agpl-3.0
| 24,875 | 0.006151 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import warnings
from contextlib import contextmanager
import six
from cryptography import utils
from cryptography.exceptions import (
InternalError, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.interfaces import (
CMACBackend, CipherBackend, DSABackend, EllipticCurveBackend, HMACBackend,
HashBackend, PBKDF2HMACBackend, PEMSerializationBackend,
PKCS8SerializationBackend, RSABackend,
TraditionalOpenSSLSerializationBackend, X509Backend
)
from cryptography.hazmat.backends.openssl.ciphers import (
_AESCTRCipherContext, _CipherContext
)
from cryptography.hazmat.backends.openssl.cmac import _CMACContext
from cryptography.hazmat.backends.openssl.dsa import (
_DSAParameters, _DSAPrivateKey, _DSAPublicKey
)
from cryptography.hazmat.backends.openssl.ec import (
_EllipticCurvePrivateKey, _EllipticCurvePublicKey
)
from cryptography.hazmat.backends.openssl.hashes import _HashContext
from cryptography.hazmat.backends.openssl.hmac import _HMACContext
from cryptography.hazmat.backends.openssl.rsa import (
_RSAPrivateKey, _RSAPublicKey
)
from cryptography.hazmat.backends.openssl.x509 import _Certificate
from cryptography.hazmat.bindings.openssl.binding import Binding
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa
from cryptography.hazmat.primitives.asymmetric.padding import (
MGF1, OAEP, PKCS1v15, PSS
)
from cryptography.hazmat.primitives.ciphers.algorithms import (
AES, ARC4, Blowfish, CAST5, Camellia, IDEA, SEED, TripleDES
)
from cryptography.hazmat.primitives.ciphers.modes import (
CBC, CFB, CFB8, CTR, ECB, GCM, OFB
)
_MemoryBIO = collections.namedtuple("_MemoryBIO", ["bio", "char_ptr"])
_OpenSSLError = collections.namedtuple("_OpenSSLError",
["code", "lib", "func", "reason"])
@utils.register_interface(CipherBackend)
@utils.register_interface(CMACBackend)
@utils.register_interface(DSABackend)
@utils.register_interface(EllipticCurveBackend)
@utils.register_interface(HashBackend)
@utils.register_interface(HMACBackend)
@utils.register_interface(PBKDF2HMACBackend)
@utils.register_interface(PKCS8SerializationBackend)
@utils.register_interface(RSABackend)
@utils.register_interface(TraditionalOpenSSLSerializationBackend)
@utils.register_interface(PEMSerializationBackend)
@utils.register_interface(X509Backend)
class Backend(object):
"""
OpenSSL API binding interfaces.
"""
name = "openssl"
def __init__(self):
self._binding = Binding()
self._ffi = self._binding.ffi
self._lib = self._binding.lib
self._binding.init_static_locks()
# adds all ciphers/digests for EVP
self._lib.OpenSSL_add_all_algorithms()
# registers available SSL/TLS ciphers and digests
self._lib.SSL_library_init()
# loads error strings for libcrypto and libssl functions
self._lib.SSL_load_error_strings()
self._cipher_registry = {}
self._register_default_ciphers()
self.activate_osrandom_engine()
def activate_builtin_random(self):
# Obtain a new structural reference.
e = self._lib.ENGINE_get_default_RAND()
if e != self._ffi.NULL:
self._lib.ENGINE_unregister_RAND(e)
# Reset the RNG to use the new engine.
self._lib.RAND_cleanup()
# decrement the structural reference from get_default_RAND
res = self._lib.ENGINE_finish(e)
assert res == 1
def activate_osrandom_engine(self):
# Unregister and free the current engine.
self.activate_builtin_random()
# Fetches an engine by id and returns it. This creates a structural
# reference.
e = self._lib.ENGINE_by_id(self._lib.Cryptography_osrandom_engine_id)
assert e != self._ffi.NULL
# Initialize the engine for use. This adds a functional reference.
res = self._lib.ENGINE_init(e)
assert res == 1
# Set the engine as the default RAND provider.
res = self._lib.ENGINE_set_default_RAND(e)
assert res == 1
# Decrement the structural ref incremented by ENGINE_by_id.
res = self._lib.ENGINE_free(e)
assert res == 1
# Decrement the functional ref incremented by ENGINE_init.
res = self._lib.ENGINE_finish(e)
assert res == 1
# Reset the RNG to use the new engine.
self._lib.RAND_cleanup()
def openssl_version_text(self):
"""
Friendly string name of the loaded OpenSSL library. This is not
necessarily the same version as it was compiled against.
Example: OpenSSL 1.0.1e 11 Feb 2013
"""
return self._ffi.string(
self._lib.SSLeay_version(self._lib.SSLEAY_VERSION)
).decode("ascii")
def create_hmac_ctx(self, key, algorithm):
return _HMACContext(self, key, algorithm)
def hash_supported(self, algorithm):
digest = self._lib.EVP_get_digestbyname(algorithm.name.encode("ascii"))
return digest != self._ffi.NULL
def hmac_supported(self, algorithm):
return self.hash_supported(algorithm)
def create_hash_ctx(self, algorithm):
return _HashContext(self, algorithm)
def cipher_supported(self, cipher, mode):
if self._evp_cipher_supported(cipher, mode):
return True
elif isinstance(mode, CTR) and isinstance(cipher, AES):
return True
else:
return False
def _evp_cipher_supported(self, cipher, mode):
try:
adapter = self._cipher_registry[type(cipher), type(mode)]
except KeyError:
return False
evp_cipher = adapter(self, cipher, mode)
return self._ffi.NULL != evp_cipher
def register_cipher_adapter(self, cipher_cls, mode_cls, adapter):
if (cipher_cls, mode_cls) in self._cipher_registry:
raise ValueError("Duplicate registration for: {0} {1}.".format(
cipher_cls, mode_cls)
)
self._cipher_registry[cipher_cls, mode_cls] = adapter
def _register_default_ciphers(self):
for mode_cls in [CBC, CTR, ECB, OFB, CFB, CFB8]:
self.register_cipher_adapter(
AES,
mode_cls,
GetCipherByName("{cipher.name}-{cipher.key_size}-{mode.name}")
)
for mode_cls in [CBC, CTR, ECB, OFB, CFB]:
self.register_cipher_adapter(
Camellia,
mode_cls,
GetCipherByName("{cipher.name}-{cipher.key_size}-{mode.name}")
)
for mode_cls in [CBC, CFB, CFB8, OFB]:
self.register_cipher_adapter(
TripleDES,
mode_cls,
GetCipherByName("des-ede3-{mode.name}")
)
self.register_cipher_adapter(
TripleDES,
ECB,
GetCipherByName("des-ede3")
)
for mode_cls in [CBC, CFB, OFB, ECB]:
self.register_cipher_adapter(
Blowfish,
mode_cls,
GetCipherByName("bf-{mode.name}")
)
for mode_cls in [CBC, CFB, OFB, ECB]:
self.register_cipher_adapter(
SEED,
mode_cls,
GetCipherByName("seed-{mode.name}")
)
for cipher_cls, mode_cls in itertools.product(
[CAST5, IDEA],
[CBC, OFB, CFB, ECB],
):
self.register_cipher_adapter(
cipher_cls,
mode_cls,
GetCipherByName("{cipher.name}-{mode.name}")
)
self.register_cipher_adapter(
ARC4,
type(None),
GetCipherByName("rc4")
)
self.register_cipher_adapter(
AES,
GCM,
GetCipherByName("{cipher.name}-{cipher.key_size}-{mode.name}")
)
def create_symmetric_encryption_ctx(self, cipher, mode):
if (isinstance(mode, CTR) and isinstance(cipher, AES)
and not self._evp_cipher_supported(cipher, mode)):
# This is needed to provide support for AES CTR mode in OpenSSL
# 0.9.8. It can be removed when we drop 0.9.8 support (RHEL 5
# extended life ends 2020).
return _AESCTRCipherContext(self, cipher, mode)
else:
return _CipherContext(self, cipher, mode, _CipherContext._ENCRYPT)
def create_symmetric_decryption_ctx(self, cipher, mode):
if (isinstance(mode, CTR) and isinstance(cipher, AES)
and not self._evp_cipher_supported(cipher, mode)):
# This is needed to provide support for AES CTR mode in OpenSSL
# 0.9.8. It can be removed when we drop 0.9.8 support (RHEL 5
# extended life ends 2020).
return _AESCTRCipherContext(self, cipher, mode)
else:
return _CipherContext(self, cipher, mode, _CipherContext._DECRYPT)
def pbkdf2_hmac_supported(self, algorithm):
if self._lib.Cryptography_HAS_PBKDF2_HMAC:
return self.hmac_supported(algorithm)
else:
# OpenSSL < 1.0.0 has an explicit PBKDF2-HMAC-SHA1 function,
# so if the PBKDF2_HMAC function is missing we only support
# SHA1 via PBKDF2_HMAC_SHA1.
return isinstance(algorithm, hashes.SHA1)
def derive_pbkdf2_hmac(self, algorithm, length, salt, iterations,
key_material):
buf = self._ffi.new("char[]", length)
if self._lib.Cryptography_HAS_PBKDF2_HMAC:
evp_md = self._lib.EVP_get_digestbyname(
algorithm.name.encode("ascii"))
assert evp_md != self._ffi.NULL
res = self._lib.PKCS5_PBKDF2_HMAC(
key_material,
len(key_material),
salt,
len(salt),
iterations,
evp_md,
length,
buf
)
assert res == 1
else:
if not isinstance(algorithm, hashes.SHA1):
raise UnsupportedAlgorithm(
"This version of OpenSSL only supports PBKDF2HMAC with "
"SHA1.",
_Reasons.UNSUPPORTED_HASH
)
res = self._lib.PKCS5_PBKDF2_HMAC_SHA1(
key_material,
len(key_material),
salt,
len(salt),
iterations,
length,
buf
)
assert res == 1
return self._ffi.buffer(buf)[:]
def _err_string(self, code):
err_buf = self._ffi.new("char[]", 256)
self._lib.ERR_error_string_n(code, err_buf, 256)
return self._ffi.string(err_buf, 256)[:]
def _consume_errors(self):
errors = []
while True:
code = self._lib.ERR_get_error()
if code == 0:
break
lib = self._lib.ERR_GET_LIB(code)
func = self._lib.ERR_GET_FUNC(code)
reason = self._lib.ERR_GET_REASON(code)
errors.append(_OpenSSLError(code, lib, func, reason))
return errors
def _unknown_error(self, error):
return InternalError(
"Unknown error code {0} from OpenSSL, "
"you should probably file a bug. {1}.".format(
error.code, self._err_string(error.code)
)
)
def _bn_to_int(self, bn):
if six.PY3:
# Python 3 has constant time from_bytes, so use that.
bn_num_bytes = (self._lib.BN_num_bits(bn) + 7) // 8
bin_ptr = self._ffi.new("unsigned char[]", bn_num_bytes)
bin_len = self._lib.BN_bn2bin(bn, bin_ptr)
assert bin_len > 0
assert bin_ptr != self._ffi.NULL
return int.from_bytes(self._ffi.buffer(bin_ptr)[:bin_len], "big")
else:
# Under Python 2 the best we can do is hex()
hex_cdata = self._lib.BN_bn2hex(bn)
assert hex_cdata != self._ffi.NULL
hex_str = self._ffi.string(hex_cdata)
self._lib.OPENSSL_free(hex_cdata)
return int(hex_str, 16)
def _int_to_bn(self, num, bn=None):
"""
Converts a python integer to a BIGNUM. The returned BIGNUM will not
be garbage collected (to support adding them to structs that take
ownership of the object). Be sure to register it for GC if it will
be discarded after use.
"""
if bn is None:
bn = self._ffi.NULL
if six.PY3:
# Python 3 has constant time to_bytes, so use that.
binary = num.to_bytes(int(num.bit_length() / 8.0 + 1), "big")
bn_ptr = self._lib.BN_bin2bn(binary, len(binary), bn)
assert bn_ptr != self._ffi.NULL
return bn_ptr
else:
# Under Python 2 the best we can do is hex()
hex_num = hex(num).rstrip("L").lstrip("0x").encode("ascii") or b"0"
bn_ptr = self._ffi.new("BIGNUM **")
bn_ptr[0] = bn
res = self._lib.BN_hex2bn(bn_ptr, hex_num)
assert res != 0
assert bn_ptr[0] != self._ffi.NULL
return bn_ptr[0]
def generate_rsa_private_key(self, public_exponent, key_size):
rsa._verify_rsa_parameters(public_exponent, key_size)
rsa_cdata = self._lib.RSA_new()
assert rsa_cdata != self._ffi.NULL
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
bn = self._int_to_bn(public_exponent)
bn = self._ffi.gc(bn, self._lib.BN_free)
res = self._lib.RSA_generate_key_ex(
rsa_cdata, key_size, bn, self._ffi.NULL
)
assert res == 1
return _RSAPrivateKey(self, rsa_cdata)
def generate_rsa_parameters_supported(self, public_exponent, key_size):
return (public_exponent >= 3 and public_exponent & 1 != 0 and
key_size >= 512)
def load_rsa_private_numbers(self, numbers):
rsa._check_private_key_components(
numbers.p,
numbers.q,
numbers.d,
numbers.dmp1,
numbers.dmq1,
numbers.iqmp,
numbers.public_numbers.e,
numbers.public_numbers.n
)
rsa_cdata = self._lib.RSA_new()
assert rsa_cdata != self._ffi.NULL
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
rsa_cdata.p = self._int_to_bn(numbers.p)
rsa_cdata.q = self._int_to_bn(numbers.q)
rsa_cdata.d = self._int_to_bn(numbers.d)
rsa_cdata.dmp1 = self._int_to_bn(numbers.dmp1)
rsa_cdata.dmq1 = self._int_to_bn(numbers.dmq1)
rsa_cdata.iqmp = self._int_to_bn(numbers.iqmp)
rsa_cdata.e = self._int_to_bn(numbers.public_numbers.e)
rsa_cdata.n = self._int_to_bn(numbers.public_numbers.n)
res = self._lib.RSA_blinding_on(rsa_cdata, self._ffi.NULL)
assert res == 1
return _RSAPrivateKey(self, rsa_cdata)
def load_rsa_public_numbers(self, numbers):
rsa._check_public_key_components(numbers.e, numbers.n)
rsa_cdata = self._lib.RSA_new()
assert rsa_cdata != self._ffi.NULL
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
rsa_cdata.e = self._int_to_bn(numbers.e)
rsa_cdata.n = self._int_to_bn(numbers.n)
res = self._lib.RSA_blinding_on(rsa_cdata, self._ffi.NULL)
assert res == 1
return _RSAPublicKey(self, rsa_cdata)
def _bytes_to_bio(self, data):
"""
Return a _MemoryBIO namedtuple of (BIO, char*).
The char* is the storage for the BIO and it must stay alive until the
BIO is finished with.
"""
data_char_p = self._ffi.new("char[]", data)
bio = self._lib.BIO_new_mem_buf(
data_char_p, len(data)
)
assert bio != self._ffi.NULL
return _MemoryBIO(self._ffi.gc(bio, self._lib.BIO_free), data_char_p)
def _create_mem_bio(self):
"""
Creates an empty memory BIO.
"""
bio_method = self._lib.BIO_s_mem()
assert bio_method != self._ffi.NULL
bio = self._lib.BIO_new(bio_method)
assert bio != self._ffi.NULL
bio = self._ffi.gc(bio, self._lib.BIO_free)
return bio
def _read_mem_bio(self, bio):
"""
Reads a memory BIO. This only works on memory BIOs.
"""
buf = self._ffi.new("char **")
buf_len = self._lib.BIO_get_mem_data(bio, buf)
assert buf_len > 0
assert buf[0] != self._ffi.NULL
bio_data = self._ffi.buffer(buf[0], buf_len)[:]
return bio_data
def _evp_pkey_to_private_key(self, evp_pkey):
"""
Return the appropriate type of PrivateKey given an evp_pkey cdata
pointer.
"""
type = evp_pkey.type
if type == self._lib.EVP_PKEY_RSA:
rsa_cdata = self._lib.EVP_PKEY_get1_RSA(evp_pkey)
assert rsa_cdata != self._ffi.NULL
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
return _RSAPrivateKey(self, rsa_cdata)
elif type == self._lib.EVP_PKEY_DSA:
dsa_cdata = self._lib.EVP_PKEY_get1_DSA(evp_pkey)
assert dsa_cdata != self._ffi.NULL
dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)
return _DSAPrivateKey(self, dsa_cdata)
elif (self._lib.Cryptography_HAS_EC == 1 and
type == self._lib.EVP_PKEY_EC):
ec_cdata = self._lib.EVP_PKEY_get1_EC_KEY(evp_pkey)
assert ec_cdata != self._ffi.NULL
ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)
return _EllipticCurvePrivateKey(self, ec_cdata)
else:
raise UnsupportedAlgorithm("Unsupported key type.")
def _evp_pkey_to_public_key(self, evp_pkey):
"""
Return the appropriate type of PublicKey given an evp_pkey cdata
pointer.
"""
type = evp_pkey.type
if type == self._lib.EVP_PKEY_RSA:
rsa_cdata = self._lib.EVP_PKEY_get1_RSA(evp_pkey)
assert rsa_cdata != self._ffi.NULL
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
return _RSAPublicKey(self, rsa_cdata)
elif type == self._lib.EVP_PKEY_DSA:
dsa_cdata = self._lib.EVP_PKEY_get1_DSA(evp_pkey)
assert dsa_cdata != self._ffi.NULL
dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)
return _DSAPublicKey(self, dsa_cdata)
elif (self._lib.Cryptography_HAS_EC == 1 and
type == self._lib.EVP_PKEY_EC):
ec_cdata = self._lib.EVP_PKEY_get1_EC_KEY(evp_pkey)
assert ec_cdata != self._ffi.NULL
ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)
return _EllipticCurvePublicKey(self, ec_cdata)
else:
raise UnsupportedAlgorithm("Unsupported key type.")
def _pem_password_cb(self, password):
"""
Generate a pem_password_cb function pointer that copied the password to
OpenSSL as required and returns the number of bytes copied.
typedef int pem_password_cb(char *buf, int size,
int rwflag, void *userdata);
Useful for decrypting PKCS8 files and so on.
Returns a tuple of (cdata function pointer, callback function).
"""
def pem_password_cb(buf, size, writing, userdata):
pem_password_cb.called += 1
if not password:
pem_password_cb.exception = TypeError(
"Password was not given but private key is encrypted."
)
return 0
elif len(password) < size:
pw_buf = self._ffi.buffer(buf, size)
pw_buf[:len(password)] = password
return len(password)
else:
pem_password_cb.exception = ValueError(
"Passwords longer than {0} bytes are not supported "
"by this backend.".format(size - 1)
)
return 0
pem_password_cb.called = 0
pem_password_cb.exception = None
return (
self._ffi.callback("int (char *, int, int, void *)",
pem_password_cb),
pem_password_cb
)
def _mgf1_hash_supported(self, algorithm):
if self._lib.Cryptography_HAS_MGF1_MD:
return self.hash_supported(algorithm)
else:
return isinstance(algorithm, hashes.SHA1)
def rsa_padding_supported(self, padding):
if isinstance(padding, PKCS1v15):
return True
elif isinstance(padding, PSS) and isinstance(padding._mgf, MGF1):
return self._mgf1_hash_supported(padding._mgf._algorithm)
elif isinstance(padding, OAEP) and isinstance(padding._mgf, MGF1):
return isinstance(padding._mgf._algorithm, hashes.SHA1)
else:
return False
def generate_dsa_parameters(self, key_size):
if key_size not in (1024, 2048, 3072):
raise ValueError(
"Key size must be 1024 or 2048 or 3072 bits.")
if (self._lib.OPENSSL_VERSION_NUMBER < 0x1000000f and
key_size > 1024):
raise ValueError(
"Key size must be 1024 because OpenSSL < 1.0.0 doesn't "
"support larger key sizes.")
ctx = self._lib.DSA_new()
assert ctx != self._ffi.NULL
ctx = self._ffi.gc(ctx, self._lib.DSA_free)
res = self._lib.DSA_generate_parameters_ex(
ctx, key_size, self._ffi.NULL, 0,
self._ffi.NULL, self._ffi.NULL, self._ffi.NULL
)
assert res == 1
return _DSAParameters(self, ctx)
def generate_dsa_private_key(self, parameters):
ctx = self._lib.DSA_new()
assert ctx != self._ffi.NULL
ctx = self._ffi.gc(ctx, self._lib.DSA_free)
ctx.p = self._lib.BN_dup(parameters._dsa_cdata.p)
ctx.q = self._lib.BN_dup(parameters._dsa_cdata.q)
ctx.g = self._lib.BN_dup(parameters._dsa_cdata.g)
self._lib.DSA_generate_key(ctx)
return _DSAPrivateKey(self, ctx)
def generate_dsa_private_key_and_parameters(self, key_size):
parameters = self.generate_dsa_parameters(key_size)
return self.generate_dsa_private_key(parameters)
def load_dsa_private_numbers(self, numbers):
dsa._check_dsa_private_numbers(numbers)
parameter_numbers = numbers.public_numbers.parameter_numbers
dsa_cdata = self._lib.DSA_new()
assert dsa_cdata != self._ffi.NULL
dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)
dsa_cdata.p = self._int_to_bn(parameter_numbers.p)
dsa_cdata.q = self._int_to_bn(parameter_numbers.q)
dsa_cdata.g = self._int_to_bn(parameter_numbers.g)
dsa_cdata.pub_key = self._int_to_bn(numbers.public_numbers.y)
dsa_cdata.priv_key = self._int_to_bn(numbers.x)
return _DSAPrivateKey(self, dsa_cdata)
def load_dsa_public_numbers(self, numbers):
dsa._check_dsa_parameters(numbers.parameter_numbers)
dsa_cdata = self._lib.DSA_new()
assert dsa_cdata != self._ffi.NULL
dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)
dsa_cdata.p = self._int_to_bn(numbers.parameter_numbers.p)
dsa_cdata.q = self._int_to_bn(numbers.parameter_numbers.q)
dsa_cdata.g = self._int_to_bn(numbers.parameter_numbers.g)
dsa_cdata.pub_key = self._int_to_bn(numbers.y)
return _DSAPublicKey(self, dsa_cdata)
def load_dsa_parameter_numbers(self, numbers):
dsa._check_dsa_parameters(numbers)
dsa_cdata = self._lib.DSA_new()
assert dsa_cdata != self._ffi.NULL
dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)
dsa_cdata.p = self._int_to_bn(numbers.p)
dsa_cdata.q = self._int_to_bn(numbers.q)
dsa_cdata.g = self._int_to_bn(numbers.g)
return _DSAParameters(self, dsa_cdata)
def dsa_hash_supported(self, algorithm):
if self._lib.OPENSSL_VERSION_NUMBER < 0x1000000f:
return isinstance(algorithm, hashes.SHA1)
else:
return self.hash_supported(algorithm)
def dsa_parameters_supported(self, p, q, g):
if self._lib.OPENSSL_VERSION_NUMBER < 0x1000000f:
return (utils.bit_length(p) <= 1024 and utils.bit_length(q) <= 160)
else:
return True
def cmac_algorithm_supported(self, algorithm):
return (
self._lib.Cryptography_HAS_CMAC == 1
and self.cipher_supported(algorithm, CBC(
b"\x00" * algorithm.block_size))
)
def create_cmac_ctx(self, algorithm):
return _CMACContext(self, algorithm)
def load_pem_private_key(self, data, password):
return self._load_key(
self._lib.PEM_read_bio_PrivateKey,
self._evp_pkey_to_private_key,
data,
password,
)
def load_pem_public_key(self, data):
return self._load_key(
self._lib.PEM_read_bio_PUBKEY,
self._evp_pkey_to_public_key,
data,
None,
)
def load_pem_x509_certificate(self, data):
mem_bio = self._bytes_to_bio(data)
x509 = self._lib.PEM_read_bio_X509(
mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL
)
if x509 == self._ffi.NULL:
self._consume_errors()
raise ValueError("Unable to load certificate")
x509 = self._ffi.gc(x509, self._lib.X509_free)
return _Certificate(self, x509)
def load_der_x509_certificate(self, data):
mem_bio = self._bytes_to_bio(data)
x509 = self._lib.d2i_X509_bio(mem_bio.bio, self._ffi.NULL)
if x509 == self._ffi.NULL:
self._consume_errors()
raise ValueError("Unable to load certificate")
x509 = self._ffi.gc(x509, self._lib.X509_free)
return _Certificate(self, x509)
def load_traditional_openssl_pem_private_key(self, data, password):
warnings.warn(
"load_traditional_openssl_pem_private_key is deprecated and will "
"be removed in a future version, use load_pem_private_key "
"instead.",
utils.DeprecatedIn06,
stacklevel=2
)
return self.load_pem_private_key(data, password)
def load_pkcs8_pem_private_key(self, data, password):
warnings.warn(
"load_pkcs8_pem_private_key is deprecated and will be removed in a"
" future version, use load_pem_private_key instead.",
utils.DeprecatedIn06,
stacklevel=2
)
return self.load_pem_private_key(data, password)
def _load_key(self, openssl_read_func, convert_func, data, password):
mem_bio = self._bytes_to_bio(data)
password_callback, password_func = self._pem_password_cb(password)
evp_pkey = openssl_read_func(
mem_bio.bio,
self._ffi.NULL,
password_callback,
self._ffi.NULL
)
if evp_pkey == self._ffi.NULL:
if password_func.exception is not None:
errors = self._consume_errors()
assert errors
raise password_func.exception
else:
self._handle_key_loading_error()
evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free)
if password is not None and password_func.called == 0:
raise TypeError(
"Password was given but private key is not encrypted.")
assert (
(password is not None and password_func.called == 1) or
password is None
)
return convert_func(evp_pkey)
def _handle_key_loading_error(self):
errors = self._consume_errors()
if not errors:
raise ValueError("Could not unserialize key data.")
elif errors[0][1:] in (
(
self._lib.ERR_LIB_EVP,
self._lib.EVP_F_EVP_DECRYPTFINAL_EX,
self._lib.EVP_R_BAD_DECRYPT
),
(
self._lib.ERR_LIB_PKCS12,
self._lib.PKCS12_F_PKCS12_PBE_CRYPT,
self._lib.PKCS12_R_PKCS12_CIPHERFINAL_ERROR,
)
):
raise ValueError("Bad decrypt. Incorrect password?")
elif errors[0][1:] in (
(
self._lib.ERR_LIB_PEM,
self._lib.PEM_F_PEM_GET_EVP_CIPHER_INFO,
self._lib.PEM_R_UNSUPPORTED_ENCRYPTION
),
(
self._lib.ERR_LIB_EVP,
self._lib.EVP_F_EVP_PBE_CIPHERINIT,
self._lib.EVP_R_UNKNOWN_PBE_ALGORITHM
)
):
raise UnsupportedAlgorithm(
"PEM data is encrypted with an unsupported cipher",
_Reasons.UNSUPPORTED_CIPHER
)
elif any(
error[1:] == (
self._lib.ERR_LIB_EVP,
self._lib.EVP_F_EVP_PKCS82PKEY,
self._lib.EVP_R_UNSUPPORTED_PRIVATE_KEY_ALGORITHM
)
for error in errors
):
raise UnsupportedAlgorithm(
"Unsupported public key algorithm.",
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
)
else:
assert errors[0][1] in (
self._lib.ERR_LIB_EVP,
self._lib.ERR_LIB_PEM,
self._lib.ERR_LIB_ASN1,
)
raise ValueError("Could not unserialize key data.")
def elliptic_curve_supported(self, curve):
if self._lib.Cryptography_HAS_EC != 1:
return False
try:
curve_nid = self._elliptic_curve_to_nid(curve)
except UnsupportedAlgorithm:
curve_nid = self._lib.NID_undef
ctx = self._lib.EC_GROUP_new_by_curve_name(curve_nid)
if ctx == self._ffi.NULL:
errors = self._consume_errors()
assert (
curve_nid == self._lib.NID_undef or
errors[0][1:] == (
self._lib.ERR_LIB_EC,
self._lib.EC_F_EC_GROUP_NEW_BY_CURVE_NAME,
self._lib.EC_R_UNKNOWN_GROUP
)
)
return False
else:
assert curve_nid != self._lib.NID_undef
self._lib.EC_GROUP_free(ctx)
return True
def elliptic_curve_signature_algorithm_supported(
self, signature_algorithm, curve
):
if self._lib.Cryptography_HAS_EC != 1:
return False
# We only support ECDSA right now.
if not isinstance(signature_algorithm, ec.ECDSA):
return False
# Before 0.9.8m OpenSSL can't cope with digests longer than the curve.
if (
self._lib.OPENSSL_VERSION_NUMBER < 0x009080df and
curve.key_size < signature_algorithm.algorithm.digest_size * 8
):
return False
return self.elliptic_curve_supported(curve)
def generate_elliptic_curve_private_key(self, curve):
"""
Generate a new private key on the named curve.
"""
if self.elliptic_curve_supported(curve):
curve_nid = self._elliptic_curve_to_nid(curve)
ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid)
assert ec_cdata != self._ffi.NULL
ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)
res = self._lib.EC_KEY_generate_key(ec_cdata)
assert res == 1
res = self._lib.EC_KEY_check_key(ec_cdata)
assert res == 1
return _EllipticCurvePrivateKey(self, ec_cdata)
else:
raise UnsupportedAlgorithm(
"Backend object does not support {0}.".format(curve.name),
_Reasons.UNSUPPORTED_ELLIPTIC_CURVE
)
def elliptic_curve_private_key_from_numbers(self, numbers):
warnings.warn(
"elliptic_curve_private_key_from_numbers is deprecated and will "
"be removed in a future version.",
utils.DeprecatedIn06,
stacklevel=2
)
return self.load_elliptic_curve_private_numbers(numbers)
def load_elliptic_curve_private_numbers(self, numbers):
public = numbers.public_numbers
curve_nid = self._elliptic_curve_to_nid(public.curve)
ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid)
assert ec_cdata != self._ffi.NULL
ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)
ec_cdata = self._ec_key_set_public_key_affine_coordinates(
ec_cdata, public.x, public.y)
res = self._lib.EC_KEY_set_private_key(
ec_cdata, self._int_to_bn(numbers.private_value))
assert res == 1
return _EllipticCurvePrivateKey(self, ec_cdata)
def elliptic_curve_public_key_from_numbers(self, numbers):
warnings.warn(
"elliptic_curve_public_key_from_numbers is deprecated and will be "
"removed in a future version.",
utils.DeprecatedIn06,
stacklevel=2
)
return self.load_elliptic_curve_public_numbers(numbers)
def load_elliptic_curve_public_numbers(self, numbers):
curve_nid = self._elliptic_curve_to_nid(numbers.curve)
ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid)
assert ec_cdata != self._ffi.NULL
ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)
ec_cdata = self._ec_key_set_public_key_affine_coordinates(
ec_cdata, numbers.x, numbers.y)
return _EllipticCurvePublicKey(self, ec_cdata)
def _elliptic_curve_to_nid(self, curve):
"""
Get the NID for a curve name.
"""
curve_aliases = {
"secp192r1": "prime192v1",
"secp256r1": "prime256v1"
}
curve_name = curve_aliases.get(curve.name, curve.name)
curve_nid = self._lib.OBJ_sn2nid(curve_name.encode())
if curve_nid == self._lib.NID_undef:
raise UnsupportedAlgorithm(
"{0} is not a supported elliptic curve".format(curve.name),
_Reasons.UNSUPPORTED_ELLIPTIC_CURVE
)
return curve_nid
@contextmanager
def _tmp_bn_ctx(self):
bn_ctx = self._lib.BN_CTX_new()
assert bn_ctx != self._ffi.NULL
bn_ctx = self._ffi.gc(bn_ctx, self._lib.BN_CTX_free)
self._lib.BN_CTX_start(bn_ctx)
try:
yield bn_ctx
finally:
self._lib.BN_CTX_end(bn_ctx)
def _ec_key_determine_group_get_set_funcs(self, ctx):
"""
Given an EC_KEY determine the group and what methods are required to
get/set point coordinates.
"""
assert ctx != self._ffi.NULL
nid_two_field = self._lib.OBJ_sn2nid(b"characteristic-two-field")
assert nid_two_field != self._lib.NID_undef
group = self._lib.EC_KEY_get0_group(ctx)
assert group != self._ffi.NULL
method = self._lib.EC_GROUP_method_of(group)
assert method != self._ffi.NULL
nid = self._lib.EC_METHOD_get_field_type(method)
assert nid != self._lib.NID_undef
if nid == nid_two_field and self._lib.Cryptography_HAS_EC2M:
set_func = self._lib.EC_POINT_set_affine_coordinates_GF2m
get_func = self._lib.EC_POINT_get_affine_coordinates_GF2m
else:
set_func = self._lib.EC_POINT_set_affine_coordinates_GFp
get_func = self._lib.EC_POINT_get_affine_coordinates_GFp
assert set_func and get_func
return set_func, get_func, group
def _ec_key_set_public_key_affine_coordinates(self, ctx, x, y):
"""
This is a port of EC_KEY_set_public_key_affine_coordinates that was
added in 1.0.1.
Sets the public key point in the EC_KEY context to the affine x and y
values.
"""
if x < 0 or y < 0:
raise ValueError(
"Invalid EC key. Both x and y must be non-negative."
)
bn_x = self._int_to_bn(x)
bn_y = self._int_to_bn(y)
set_func, get_func, group = (
self._ec_key_determine_group_get_set_funcs(ctx)
)
point = self._lib.EC_POINT_new(group)
assert point != self._ffi.NULL
point = self._ffi.gc(point, self._lib.EC_POINT_free)
with self._tmp_bn_ctx() as bn_ctx:
check_x = self._lib.BN_CTX_get(bn_ctx)
check_y = self._lib.BN_CTX_get(bn_ctx)
res = set_func(group, point, bn_x, bn_y, bn_ctx)
assert res == 1
res = get_func(group, point, check_x, check_y, bn_ctx)
assert res == 1
assert self._lib.BN_cmp(bn_x, check_x) == 0
assert self._lib.BN_cmp(bn_y, check_y) == 0
res = self._lib.EC_KEY_set_public_key(ctx, point)
assert res == 1
res = self._lib.EC_KEY_check_key(ctx)
if res != 1:
self._consume_errors()
raise ValueError("Invalid EC key.")
return ctx
class GetCipherByName(object):
def __init__(self, fmt):
self._fmt = fmt
def __call__(self, backend, cipher, mode):
cipher_name = self._fmt.format(cipher=cipher, mode=mode).lower()
return backend._lib.EVP_get_cipherbyname(cipher_name.encode("ascii"))
backend = Backend()
|
CoderBotOrg/coderbotsrv
|
server/lib/cryptography/hazmat/backends/openssl/backend.py
|
Python
|
gpl-3.0
| 38,174 | 0 |
# check_full_toc.py - Unit tests for SWIG-based libcueify full TOC APIs
#
# Copyright (c) 2011 Ian Jacobi <pipian@pipian.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# KLUDGE to allow tests to work.
import sys
sys.path.insert(0, '../../build/swig/python')
import cueify
import struct
import unittest
# Create a binary track descriptor from a full TOC.
def TRACK_DESCRIPTOR(session, adr, ctrl, track,
abs_min, abs_sec, abs_frm, min, sec, frm):
return [session, (((adr & 0xF) << 4) | (ctrl & 0xF)), 0, track,
abs_min, abs_sec, abs_frm, 0, min, sec, frm]
serialized_mock_full_toc = [(((13 + 2 * 3) * 11 + 2) >> 8),
(((13 + 2 * 3) * 11 + 2) & 0xFF), 1, 2]
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 0xA0, 0, 0, 0, 1, cueify.SESSION_MODE_1, 0))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 0xA1, 0, 0, 0, 12, 0, 0))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 0xA2, 0, 0, 0, 51, 44, 26))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 1, 0, 0, 0, 0, 2, 0))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 2, 0, 0, 0, 4, 47, 70))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 3, 0, 0, 0, 7, 42, 57))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 4, 0, 0, 0, 13, 47, 28))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 5, 0, 0, 0, 18, 28, 50))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 6, 0, 0, 0, 21, 56, 70))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 7, 0, 0, 0, 24, 56, 74))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 8, 0, 0, 0, 30, 10, 55))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 9, 0, 0, 0, 34, 17, 20))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 10, 0, 0, 0, 39, 18, 66))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 11, 0, 0, 0, 43, 16, 40))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 12, 0, 0, 0, 47, 27, 61))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(2, 1, 6, 0xA0, 0, 0, 0, 13, cueify.SESSION_MODE_2, 0))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(2, 1, 6, 0xA1, 0, 0, 0, 13, 0, 0))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(2, 1, 6, 0xA2, 0, 0, 0, 57, 35, 13))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(2, 1, 6, 13, 1, 2, 3, 54, 16, 26))
class TestFullTOCFunctions(unittest.TestCase):
def test_serialization(self):
# Test both deserialization and serialization (since, unlike
# in the C code, the Python library does not support directly
# specifying the mock TOC.
full_toc = cueify.FullTOC()
self.assertTrue(
full_toc.deserialize(
struct.pack(
"B" * len(serialized_mock_full_toc),
*serialized_mock_full_toc)))
s = full_toc.serialize()
self.assertEqual(full_toc.errorCode, cueify.OK)
self.assertEqual(len(s), len(serialized_mock_full_toc))
self.assertEqual(
s,
struct.pack(
"B" * len(serialized_mock_full_toc),
*serialized_mock_full_toc))
def test_getters(self):
full_toc = cueify.FullTOC()
self.assertTrue(
full_toc.deserialize(
struct.pack(
"B" * len(serialized_mock_full_toc),
*serialized_mock_full_toc)))
self.assertEqual(full_toc.firstSession, 1)
self.assertEqual(full_toc.lastSession, 2)
self.assertEqual(len(full_toc.tracks), 13)
self.assertEqual(full_toc.tracks[0].session, 1)
self.assertEqual(full_toc.tracks[12].session, 2)
self.assertEqual(full_toc.tracks[0].controlFlags, 4)
self.assertEqual(full_toc.tracks[12].controlFlags, 6)
self.assertEqual(full_toc.tracks[0].subQChannelFormat, 1)
self.assertEqual(full_toc.tracks[12].subQChannelFormat, 1)
self.assertEqual(len(full_toc.sessions), 2)
self.assertEqual(len(full_toc.sessions[0].pseudotracks), 3)
self.assertEqual(full_toc.sessions[0].pseudotracks[cueify.FULL_TOC_FIRST_TRACK_PSEUDOTRACK].controlFlags, 4)
self.assertEqual(full_toc.sessions[0].pseudotracks[cueify.FULL_TOC_LAST_TRACK_PSEUDOTRACK].controlFlags, 4)
self.assertEqual(full_toc.sessions[0].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].controlFlags, 4)
self.assertEqual(full_toc.sessions[1].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].controlFlags, 6)
self.assertEqual(full_toc.sessions[0].pseudotracks[cueify.FULL_TOC_FIRST_TRACK_PSEUDOTRACK].subQChannelFormat, 1)
self.assertEqual(full_toc.sessions[0].pseudotracks[cueify.FULL_TOC_LAST_TRACK_PSEUDOTRACK].subQChannelFormat, 1)
self.assertEqual(full_toc.sessions[0].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].subQChannelFormat, 1)
self.assertEqual(full_toc.sessions[1].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].subQChannelFormat, 1)
self.assertEqual(full_toc.tracks[0].pointAddress.min, 0)
self.assertEqual(full_toc.tracks[0].pointAddress.sec, 0)
self.assertEqual(full_toc.tracks[0].pointAddress.frm, 0)
self.assertEqual(full_toc.sessions[1].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].pointAddress.min, 0)
self.assertEqual(full_toc.sessions[1].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].pointAddress.sec, 0)
self.assertEqual(full_toc.sessions[1].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].pointAddress.frm, 0)
self.assertEqual(full_toc.tracks[12].pointAddress.min, 1)
self.assertEqual(full_toc.tracks[12].pointAddress.sec, 2)
self.assertEqual(full_toc.tracks[12].pointAddress.frm, 3)
self.assertEqual(full_toc.tracks[0].address.min, 0)
self.assertEqual(full_toc.tracks[0].address.sec, 2)
self.assertEqual(full_toc.tracks[0].address.frm, 0)
self.assertEqual(full_toc.tracks[12].address.min, 54)
self.assertEqual(full_toc.tracks[12].address.sec, 16)
self.assertEqual(full_toc.tracks[12].address.frm, 26)
self.assertEqual(full_toc.sessions[0].firstTrack, 1)
self.assertEqual(full_toc.sessions[1].firstTrack, 13)
self.assertEqual(full_toc.sessions[0].lastTrack, 12)
self.assertEqual(full_toc.sessions[1].lastTrack, 13)
self.assertEqual(full_toc.firstTrack, 1)
self.assertEqual(full_toc.lastTrack, 13)
self.assertEqual(full_toc.sessions[0].type, cueify.SESSION_MODE_1)
self.assertEqual(full_toc.sessions[1].type, cueify.SESSION_MODE_2)
self.assertEqual(full_toc.sessions[1].leadoutAddress.min, 57)
self.assertEqual(full_toc.sessions[1].leadoutAddress.sec, 35)
self.assertEqual(full_toc.sessions[1].leadoutAddress.frm, 13)
self.assertEqual(full_toc.discLength.min, 57)
self.assertEqual(full_toc.discLength.sec, 35)
self.assertEqual(full_toc.discLength.frm, 13)
self.assertEqual(full_toc.tracks[11].length.min, 4)
self.assertEqual(full_toc.tracks[11].length.sec, 16)
self.assertEqual(full_toc.tracks[11].length.frm, 40)
self.assertEqual(full_toc.sessions[1].length.min, 3)
self.assertEqual(full_toc.sessions[1].length.sec, 18)
self.assertEqual(full_toc.sessions[1].length.frm, 62)
if __name__ == '__main__':
unittest.main()
|
pipian/libcueify
|
tests/swig/check_full_toc.py
|
Python
|
mit
| 8,463 | 0.002363 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0016_change_page_url_path_to_text_field"),
]
operations = [
migrations.AlterField(
model_name="grouppagepermission",
name="permission_type",
field=models.CharField(
choices=[
("add", "Add/edit pages you own"),
("edit", "Edit any page"),
("publish", "Publish any page"),
("lock", "Lock/unlock any page"),
],
max_length=20,
verbose_name="Permission type",
),
preserve_default=True,
),
]
|
rsalmaso/wagtail
|
wagtail/core/migrations/0017_change_edit_page_permission_description.py
|
Python
|
bsd-3-clause
| 771 | 0 |
from digitalio import DigitalInOut, Direction, Pull
import board
import time
import neopixel
led = DigitalInOut(board.D13)
led.direction = Direction.OUTPUT
pixelPin = board.D2
pixelNumber = 8
strip = neopixel.NeoPixel(pixelPin, pixelNumber, brightness=1, auto_write=False)
switch = DigitalInOut(board.D1)
switch.direction = Direction.INPUT
switch.pull = Pull.UP
def wheel(pos):
if (pos < 0) or (pos > 255):
return (0, 0, 0)
if (pos < 85):
return (int(pos * 3), int(255 - (pos * 3)), 0)
elif (pos < 170):
pos -= 85
return (int(255 - pos * 3), 0, int(pos * 3))
else:
pos -= 170
return (0, int(pos * 3), int(255 - pos * 3))
def rainbow_cycle(wait):
for outer in range(255):
for inner in range(len(strip)):
index = int((inner * 256 / len(strip)) + outer)
strip[inner] = wheel(index & 255)
strip.write()
time.sleep(wait)
while True:
if switch.value:
led.value = False
strip.fill((0, 0, 0))
strip.write()
else:
led.value = True
# strip.fill((255, 0, 0))
rainbow_cycle(0.001)
# time.sleep(0.01)
|
chris-schmitz/circuit-python-acrylic-nightlight
|
code/code.py
|
Python
|
mit
| 1,177 | 0.002549 |
from pandac.PandaModules import *
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
ENDLESS_GAME = config.GetBool('endless-ring-game', 0)
NUM_RING_GROUPS = 16
MAX_TOONXZ = 15.0
MAX_LAT = 5
MAX_FIELD_SPAN = 135
CollisionRadius = 1.5
CollideMask = ToontownGlobals.CatchGameBitmask
TARGET_RADIUS = MAX_TOONXZ / 3.0 * 0.9
targetColors = ((TTLocalizer.ColorRed, VBase4(1.0, 0.4, 0.2, 1.0)),
(TTLocalizer.ColorGreen, VBase4(0.0, 0.9, 0.2, 1.0)),
(TTLocalizer.ColorOrange, VBase4(1.0, 0.5, 0.25, 1.0)),
(TTLocalizer.ColorPurple, VBase4(1.0, 0.0, 1.0, 1.0)),
(TTLocalizer.ColorWhite, VBase4(1.0, 1.0, 1.0, 1.0)),
(TTLocalizer.ColorBlack, VBase4(0.0, 0.0, 0.0, 1.0)),
(TTLocalizer.ColorYellow, VBase4(1.0, 1.0, 0.2, 1.0)))
ENVIRON_LENGTH = 300
ENVIRON_WIDTH = 150.0
ringColorSelection = [(0, 1, 2),
3,
4,
5,
6]
colorRed = {}
colorRed['Red'] = 1.0
colorRed['Green'] = 0.0
colorRed['Blue'] = 0.0
colorRed['Alpha'] = 0.5
colorBlue = {}
colorBlue['Red'] = 0.0
colorBlue['Green'] = 0.0
colorBlue['Blue'] = 1.0
colorBlue['Alpha'] = 0.5
colorGreen = {}
colorGreen['Red'] = 0.0
colorGreen['Green'] = 1.0
colorGreen['Blue'] = 0.0
colorGreen['Alpha'] = 0.5
colorYellow = {}
colorYellow['Red'] = 1.0
colorYellow['Green'] = 1.0
colorYellow['Blue'] = 0.0
colorYellow['Alpha'] = 0.5
colorPurple = {}
colorPurple['Red'] = 0.75
colorPurple['Green'] = 0.0
colorPurple['Blue'] = 1.0
colorPurple['Alpha'] = 0.5
colorOrange = {}
colorOrange['Red'] = 1.0
colorOrange['Green'] = 0.6
colorOrange['Blue'] = 0.0
colorOrange['Alpha'] = 0.5
colorBlack = {}
colorBlack['Red'] = 0.0
colorBlack['Green'] = 0.0
colorBlack['Blue'] = 0.0
colorBlack['Alpha'] = 1.0
colorWhite = {}
colorWhite['Red'] = 1.0
colorWhite['Green'] = 1.0
colorWhite['Blue'] = 1.0
colorWhite['Alpha'] = 1.0
difficultyPatterns = {ToontownGlobals.ToontownCentral: [[8,
4,
2,
0],
[10,
16,
21,
28],
[31,
15,
7,
3.5],
[colorRed,
colorGreen,
colorBlue,
colorYellow],
[2,
2,
2,
1],
10,
2],
ToontownGlobals.DonaldsDock: [[7,
4,
2,
0],
[11,
17,
23,
32],
[29,
13,
6.5,
3.2],
[colorRed,
colorGreen,
colorBlue,
colorYellow],
[2,
2,
2,
1],
9,
2],
ToontownGlobals.DaisyGardens: [[6,
4,
2,
0],
[11,
18,
25,
34],
[29,
13,
6.5,
3.1],
[colorRed,
colorGreen,
colorBlue,
colorYellow],
[2,
2,
2,
1],
8,
2],
ToontownGlobals.MinniesMelodyland: [[6,
4,
2,
0],
[12,
19,
27,
37],
[28,
12,
6,
3.0],
[colorGreen,
colorBlue,
colorYellow,
colorPurple],
[2,
2,
2,
1],
8,
2],
ToontownGlobals.TheBrrrgh: [[5,
4,
2,
0],
[12,
20,
29,
40],
[25,
12,
5.5,
2.5],
[colorGreen,
colorBlue,
colorYellow,
colorPurple],
[2,
2,
2,
1],
7,
2],
ToontownGlobals.DonaldsDreamland: [[4,
3,
1,
0],
[12,
21,
31,
42],
[20,
10,
4.5,
2.0],
[colorBlue,
colorYellow,
colorPurple,
colorOrange],
[2,
2,
2,
1],
7,
2]}
|
ToonTownInfiniteRepo/ToontownInfinite
|
toontown/minigame/TargetGameGlobals.py
|
Python
|
mit
| 7,031 | 0.001991 |
import lorun
import os
import codecs
import random
import subprocess
import config
import sys
RESULT_MAP = [
2, 10, 5, 4, 3, 6, 11, 7, 12
]
class Runner:
def __init__(self):
return
def compile(self, judger, srcPath, outPath):
cmd = config.langCompile[judger.lang] % {'root': sys.path[0], 'src': srcPath, 'target': outPath}
p = subprocess.Popen(cmd, shell = True,
stdout = subprocess.PIPE, stdin = subprocess.PIPE, stderr = subprocess.STDOUT)
retval = p.wait()
return (retval, p.stdout.read())
def judge(self, judger, srcPath, outPath, inFile, ansFile, memlimit, timelimit):
cmd = config.langRun[judger.lang] % {'src': srcPath, 'target': outPath}
fout_path = "".join([sys.path[0], "/", "%s/%d.out" % (config.dataPath["tempPath"], random.randint(0, 65536))])
if os.path.exists(fout_path):
os.remove(fout_path)
fin = open(inFile, 'rU')
fout = open(fout_path, 'w')
runcfg = {
'args': cmd.split(" "),
'fd_in': fin.fileno(),
'fd_out': fout.fileno(),
'timelimit': int(timelimit),
'memorylimit': int(memlimit)
}
rst = lorun.run(runcfg)
fin.close()
fout.close()
if rst['result'] == 0:
fans = open(ansFile, 'rU')
fout = open(fout_path, 'rU')
crst = lorun.check(fans.fileno(), fout.fileno())
fout.close()
fans.close()
return (RESULT_MAP[crst], int(rst['memoryused']), int(rst['timeused']))
return (RESULT_MAP[rst['result']], 0, 0)
|
SkyZH/CloudOJWatcher
|
ojrunnerlinux.py
|
Python
|
gpl-3.0
| 1,643 | 0.009738 |
__author__ = 'Dr. Masroor Ehsan'
__email__ = 'masroore@gmail.com'
__copyright__ = 'Copyright 2013, Dr. Masroor Ehsan'
__license__ = 'BSD'
__version__ = '0.1.1'
from datetime import datetime
try:
from lxml import etree
except ImportError:
try:
# Python 2.5
import xml.etree.cElementTree as etree
except ImportError:
try:
# Python 2.5
import xml.etree.ElementTree as etree
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
except ImportError:
# normal ElementTree install
import elementtree.ElementTree as etree
__all__ = ['parse_single_article', 'parse_story_set', 'parse_article_set']
def _find_and_set(key, rootNode, dict_obj, cb=None):
node = rootNode.find(key)
if node is not None:
dict_obj[key] = cb(node.text) if cb is not None else node.text
def _parse_datetime(input):
return datetime.strptime(input, "%Y-%m-%d %H:%M:%S")
def _parse_category_set(rootNode, tagName='category'):
categories = []
categoriesNode = rootNode.find('categories_set')
for categoryNode in categoriesNode.findall(tagName):
category = {}
_find_and_set('name', categoryNode, category)
_find_and_set('dashed_name', categoryNode, category)
if len(category) > 0:
categories.append(category)
return categories
def parse_category_set(content):
rootNode = etree.fromstring(content)
return _parse_category_set(rootNode)
def parse_single_article(content):
rootNode = etree.fromstring(content)
return _parse_single_article(rootNode)
def _parse_single_topic(rootNode):
topic = {}
_find_and_set('name', rootNode, topic)
_find_and_set('topic_group', rootNode, topic)
_find_and_set('topic_subclassification', rootNode, topic)
_find_and_set('score', rootNode, topic, float)
_find_and_set('image_url', rootNode, topic)
_find_and_set('link', rootNode, topic)
_find_and_set('guid', rootNode, topic)
_find_and_set('topic_classification', rootNode, topic)
_find_and_set('description', rootNode, topic)
return topic if len(topic) > 0 else None
def _parse_topic_set(rootNode):
topicSetNode = rootNode.find('topic_set')
topic_set = []
if topicSetNode is not None:
for node in topicSetNode.findall('topic'):
topic = _parse_single_topic(node)
if topic is not None:
topic_set.append(topic)
return topic_set if len(topic_set) > 0 else None
def _parse_thumbnail(rootNode, dict_obj):
thumbNode = rootNode.find('thumbnail')
if thumbNode is not None:
thumb = {}
_find_and_set('original_image', thumbNode, thumb)
_find_and_set('link', thumbNode, thumb)
if len(thumb) > 0:
dict_obj['thumbnail'] = thumb
def _parse_single_article(rootNode):
article = {}
_find_and_set('description', rootNode, article)
_find_and_set('title', rootNode, article)
_find_and_set('created_at', rootNode, article, _parse_datetime)
_find_and_set('published_at', rootNode, article, _parse_datetime)
_find_and_set('score', rootNode, article, float)
_find_and_set('link', rootNode, article)
_find_and_set('guid', rootNode, article)
catNode = rootNode.find('category')
article['category'] = {
'name': catNode.find('name').text,
'dashed_name': catNode.find('dashed_name').text}
authorSetNode = rootNode.find('author_set')
if authorSetNode is not None:
article['author_set'] = []
for authorNode in authorSetNode.findall('author'):
author = {
'guid': authorNode.find('guid').text,
'first_name': authorNode.find('first_name').text,
'last_name': authorNode.find('last_name').text,
}
article['author_set'].append(author)
topic_set = _parse_topic_set(rootNode)
if topic_set:
article['topic_set'] = topic_set
srcNode = rootNode.find('source')
source_dict = {}
_find_and_set('website', srcNode, source_dict)
_find_and_set('name', srcNode, source_dict)
_find_and_set('circulation', srcNode, source_dict, int)
_find_and_set('country', srcNode, source_dict)
_find_and_set('company_type', srcNode, source_dict)
_find_and_set('founded', srcNode, source_dict)
_find_and_set('staff_authors', srcNode, source_dict, int)
_find_and_set('frequency', srcNode, source_dict)
_find_and_set('owner', srcNode, source_dict)
_find_and_set('guid', srcNode, source_dict)
_find_and_set('is_blog', srcNode, source_dict, bool)
_find_and_set('thumbnail', srcNode, source_dict)
_find_and_set('description', srcNode, source_dict)
mediaNode = srcNode.find('media_type')
media_dict = {}
_find_and_set('name', mediaNode, media_dict)
_find_and_set('dashed_name', mediaNode, media_dict)
if len(media_dict) > 0:
source_dict['media_type'] = media_dict
if len(source_dict) > 0:
article['source'] = source_dict
return article
def _parse_author_set(rootNode):
authorSetNode = rootNode.find('author_set')
authors = []
if authorSetNode is not None:
for node in authorSetNode.findall('author'):
author = {}
_find_and_set('guid', node, author)
_find_and_set('name', node, author)
if len(author) > 0:
authors.append(author)
return authors if len(authors) > 0 else None
def _parse_story_set_article(rootNode):
article = {}
_find_and_set('description', rootNode, article)
_find_and_set('title', rootNode, article)
_find_and_set('published_at', rootNode, article, _parse_datetime)
_find_and_set('link', rootNode, article)
_find_and_set('guid', rootNode, article)
categories = _parse_category_set(rootNode, tagName='categories')
if categories is not None:
article['categories_set'] = categories
sourceNode = rootNode.find('source')
if sourceNode is not None:
source_dict = {}
_find_and_set('name', sourceNode, source_dict)
_find_and_set('guid', sourceNode, source_dict)
if len(source_dict) > 0:
article['source'] = source_dict
author_set = _parse_author_set(rootNode)
if author_set is not None:
article['author_set'] = author_set
return article
def _parse_story_node(rootNode):
story = {}
_find_and_set('num_articles', rootNode, story, int)
_find_and_set('guid', rootNode, story)
articles = []
for articleNode in rootNode.find('article_set').findall('article'):
article = _parse_story_set_article(articleNode)
if article is not None:
articles.append(article)
if len(articles) > 0:
story['article_set'] = articles
return story
def parse_story_set(content):
rootNode = etree.fromstring(content)
story_set = []
for storyNode in rootNode.findall('story'):
story_set.append(_parse_story_node(storyNode))
return story_set
def parse_article_set(content):
rootNode = etree.fromstring(content)
#<article_set num_found="197218">
article_set = []
for storyNode in rootNode.findall('article'):
article_set.append(_parse_single_article(storyNode))
return article_set
|
masroore/pynewscred
|
pynewscred/req_parser.py
|
Python
|
bsd-3-clause
| 7,407 | 0.00027 |
import tak
from . import tps
import attr
import re
@attr.s
class PTN(object):
tags = attr.ib()
moves = attr.ib()
@classmethod
def parse(cls, text):
head, tail = text.split("\n\n", 1)
tags_ = re.findall(r'^\[(\w+) "([^"]+)"\]$', head, re.M)
tags = dict(tags_)
tail = re.sub(r'{[^}]+}', ' ', tail)
moves = []
tokens = re.split(r'\s+', tail)
for t in tokens:
if t == '--':
continue
if re.search(r'\A(0|R|F|1|1/2)-(0|R|F|1|1/2)\Z', t):
continue
if re.match(r'\A\d+\.\Z', t):
continue
if t == '':
continue
t = re.sub(r"['!?]+$", '', t)
m = parse_move(t)
moves.append(m)
return cls(tags = tags, moves = moves)
def initial_position(self):
if 'TPS' in self.tags:
return tps.parse_tps(self.tags['TPS'])
return tak.Position.from_config(
tak.Config(size = int(self.tags['Size'])))
slide_map = {
'-': tak.MoveType.SLIDE_DOWN,
'+': tak.MoveType.SLIDE_UP,
'<': tak.MoveType.SLIDE_LEFT,
'>': tak.MoveType.SLIDE_RIGHT,
}
slide_rmap = dict((v, k) for (k, v) in slide_map.items())
place_map = {
'': tak.MoveType.PLACE_FLAT,
'S': tak.MoveType.PLACE_STANDING,
'C': tak.MoveType.PLACE_CAPSTONE,
'F': tak.MoveType.PLACE_FLAT,
}
place_rmap = {
tak.MoveType.PLACE_FLAT: '',
tak.MoveType.PLACE_STANDING: 'S',
tak.MoveType.PLACE_CAPSTONE: 'C',
}
def parse_move(move):
m = re.search(r'\A([CFS]?)([1-8]?)([a-h])([1-8])([<>+-]?)([1-8]*)[CFS]?\Z', move)
if not m:
raise BadMove(move, "malformed move")
stone, pickup, file, rank, dir, drops = m.groups()
x = ord(file) - ord('a')
y = ord(rank) - ord('1')
if pickup and not dir:
raise BadMove(move, "pick up but no direction")
typ = None
if dir:
typ = slide_map[dir]
else:
typ = place_map[stone]
slides = None
if drops:
slides = tuple(ord(c) - ord('0') for c in drops)
if (drops or pickup) and not dir:
raise BadMove(move, "pickup/drop without a direction")
if dir and not pickup and not slides:
pickup = '1'
if pickup and not slides:
slides = (int(pickup),)
if pickup and int(pickup) != sum(slides):
raise BadMove(move, "inconsistent pickup and drop: {0} v {1}".format(pickup, drops))
return tak.Move(x, y, typ, slides)
def format_move(move):
bits = []
bits.append(place_rmap.get(move.type, ''))
if move.type.is_slide():
pickup = sum(move.slides)
if pickup != 1:
bits.append(pickup)
bits.append(chr(move.x + ord('a')))
bits.append(chr(move.y + ord('1')))
if move.type.is_slide():
bits.append(slide_rmap[move.type])
if len(move.slides) > 1:
bits += [chr(d + ord('0')) for d in move.slides]
return ''.join(map(str, bits))
class BadMove(Exception):
def __init__(self, move, error):
self.move = move
self.error = error
super().__init__("{0}: {1}".format(error, move))
|
nelhage/taktician
|
python/tak/ptn/ptn.py
|
Python
|
mit
| 2,895 | 0.018307 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# do this when > 1.6!!!
# from django.db import migrations, models
from gazetteer.models import GazSource,GazSourceConfig,LocationTypeField,CodeFieldConfig,NameFieldConfig
from skosxl.models import Concept, Scheme, MapRelation
from gazetteer.settings import TARGET_NAMESPACE_FT
def load_base_ft():
(sch,created) = Scheme.objects.get_or_create(uri=TARGET_NAMESPACE_FT[:-1], defaults = { 'pref_label' :"Gaz Feature types" })
try:
(ft,created) = Concept.objects.get_or_create(term="ADMIN", defaults = { 'pref_label' :"Populated Place", 'definition':"Populated place"} , scheme = sch)
except:
pass
# now set up cross references from NGA feature types namespace
# now set up harvest config
def load_ft_mappings() :
pass
def load_config() :
try:
GazSourceConfig.objects.filter(name="TM_WorldBoundaries").delete()
except:
pass
config=GazSourceConfig.objects.create(lat_field="lat", name="TM_WorldBoundaries", long_field="lon")
NameFieldConfig.objects.create(config=config,language="en", as_default=True, languageNamespace="", field="name", languageField="")
LocationTypeField.objects.create(field='"ADMIN"',namespace=TARGET_NAMESPACE_FT, config=config)
CodeFieldConfig.objects.create(config=config,field="iso3",namespace="http://mapstory.org/id/countries/iso3")
CodeFieldConfig.objects.create(config=config,field="iso2",namespace="http://mapstory.org/id/countries/iso2")
CodeFieldConfig.objects.create(config=config,field="un",namespace="http://mapstory.org/id/countries/un")
CodeFieldConfig.objects.create(config=config,field="fips",namespace="http://mapstory.org/id/countries/fips")
(s,created) = GazSource.objects.get_or_create(source="tm_world_borders", config=config, source_type="mapstory")
print (s,created)
"""
class Migration(migrations.Migration):
initial = True
dependencies = [
#('yourappname', '0001_initial'),
]
operations = [
migrations.RunPython(load_ft_mappings),
migrations.RunPython(load_config),
]
"""
|
rob-metalinkage/django-gazetteer
|
gazetteer/fixtures/mapstory_tm_world_config.py
|
Python
|
cc0-1.0
| 2,139 | 0.02618 |
a = {"abc": "d<caret>ef"}
|
asedunov/intellij-community
|
python/testData/breadcrumbs/dictKey.py
|
Python
|
apache-2.0
| 25 | 0.04 |
# Make your image, region, and location changes then change the from-import
# to match.
from configurables_akeeton_desktop import *
import hashlib
import java.awt.Toolkit
import json
import os
import shutil
import time
Settings.ActionLogs = True
Settings.InfoLogs = True
Settings.DebugLogs = True
Settings.LogTime = True
Settings.AutoWaitTimeout = AUTO_WAIT_TIMEOUT_SECONDS
TEMP_DIR_PREFIX = time.strftime("MTGO-scry-bug_%Y-%m-%d_%H-%M-%S", time.gmtime())
TEMP_PATH = tempfile.mkdtemp(prefix=TEMP_DIR_PREFIX)
attempts = 0
def main():
global attempts
attempts += 1
ATTEMPT_NUM_PATH = get_attempt_number_path(attempts)
HITS_PATH = os.path.join(ATTEMPT_NUM_PATH, HITS_DIR)
MISSES_PATH = os.path.join(ATTEMPT_NUM_PATH, MISSES_DIR)
print "TEMP_PATH:", TEMP_PATH
print "ATTEMPT_NUM_PATH", ATTEMPT_NUM_PATH
print "HITS_PATH:", HITS_PATH
print "MISSES_PATH:", MISSES_PATH
os.mkdir(ATTEMPT_NUM_PATH)
os.mkdir(HITS_PATH)
os.mkdir(MISSES_PATH)
iterations = 0
hits = 0
card_hash_to_times_card_sent_to_bottom = ['card_hash_to_times_card_sent_to_bottom', ZeroValueDict()]
card_hash_to_times_card_sent_to_bottom_and_drawn = ['card_hash_to_times_card_sent_to_bottom_and_drawn', ZeroValueDict()]
card_hash_to_times_card_drawn = ['card_hash_to_times_card_drawn', ZeroValueDict()]
card_hash_to_capture = ['card_hash_to_capture', {}]
while True:
REGION_PLAY.wait("play.png")
time.sleep(0.5)
REGION_PLAY.click(LOCATION_PLAY)
time.sleep(1.0)
REGION_MULLIGAN_KEEP.wait("mulligan_keep.png")
for i in range(0, 7):
REGION_MULLIGAN_KEEP.wait("mulligan_highlighted_keep.png")
time.sleep(2.0) # I swear if I have to keep incrementing this value...
REGION_MULLIGAN_KEEP.click(LOCATION_MULLIGAN)
time.sleep(1.0)
REGION_TEMPORARY_ZONE.wait("temporary_zone.png")
time.sleep(0.5)
click(LOCATION_TEMPORARY_ZONE_CARD)
time.sleep(0.5)
REGION_PUT_ON_THE_BOTTOM_OF_YOUR_LIBRARY.click(LOCATION_PUT_ON_THE_BOTTOM_OF_YOUR_LIBRARY)
time.sleep(0.1)
REGION_CHAT_PUT_A_CARD_ON_THE_BOTTOM_OF_THE_LIBRARY.wait("chat_put_a_card_on_the_bottom_of_the_library.png")
time.sleep(0.1)
card_sent_to_bottom_capture = capture(REGION_CARD_PREVIEW_CAPTURE)
hover(LOCATION_FIRST_CARD_IN_HAND) # Update the preview with the drawn card.
time.sleep(0.5)
card_drawn_capture = capture(REGION_CARD_PREVIEW_CAPTURE)
copy_path = ""
card_sent_to_bottom_hash = hash_file(card_sent_to_bottom_capture)
card_drawn_hash = hash_file(card_drawn_capture)
card_hash_to_times_card_sent_to_bottom[1][card_sent_to_bottom_hash] += 1
card_hash_to_times_card_drawn[1][card_drawn_hash] += 1
if card_sent_to_bottom_hash == card_drawn_hash:
hits += 1
card_hash_to_times_card_sent_to_bottom_and_drawn[1][card_sent_to_bottom_hash] += 1
copy_path = HITS_PATH
else:
copy_path = MISSES_PATH
iterations += 1
print "{0}/{1}".format(hits, iterations)
card_sent_to_bottom_capture_dest_path = os.path.join(copy_path, str(iterations) + "_bottom.png")
card_drawn_capture_dest_path = os.path.join(copy_path, str(iterations) + "_drawn.png")
shutil.move(card_sent_to_bottom_capture, card_sent_to_bottom_capture_dest_path)
shutil.move(card_drawn_capture, card_drawn_capture_dest_path)
card_hash_to_capture[1][card_sent_to_bottom_hash] = card_sent_to_bottom_capture_dest_path
card_hash_to_capture[1][card_drawn_hash] = card_drawn_capture_dest_path
with open(os.path.join(ATTEMPT_NUM_PATH, 'stats.json'), 'w') as stats_file:
json.dump(card_hash_to_times_card_sent_to_bottom_and_drawn, stats_file, sort_keys=True, indent=4)
stats_file.write('\n')
json.dump(card_hash_to_times_card_sent_to_bottom, stats_file, sort_keys=True, indent=4)
stats_file.write('\n')
json.dump(card_hash_to_times_card_drawn, stats_file, sort_keys=True, indent=4)
stats_file.write('\n')
json.dump(card_hash_to_capture, stats_file, sort_keys=True, indent=4)
stats_file.write('\n')
stats_file.write('{0}/{1}'.format(hits, iterations))
click(LOCATION_X_CLOSE)
REGION_CONCEDE_MATCH_BUTTON.wait("concede_match.png")
time.sleep(0.1)
type('\n')
class ZeroValueDict(dict):
def __missing__(self, key):
return 0
def hash_file(file_path):
hasher = hashlib.md5()
with open(file_path, 'rb') as opened_file:
buf = opened_file.read()
hasher.update(buf)
return hasher.hexdigest()
def get_attempt_number_path(attempts):
return os.path.join(TEMP_PATH, 'attempt_{0}'.format(attempts))
if __name__ == '__main__':
while True:
try:
main()
except FindFailed as e:
for i in range(0, TIMES_TO_BEEP_ON_FIND_FAIlED):
java.awt.Toolkit.getDefaultToolkit().beep()
time.sleep(1.0)
print e
with open(os.path.join(get_attempt_number_path(attempts), 'error.log'), 'w') as errorlog:
errorlog.write(str(e))
raise e # Replace this with a way to reset MTGO to a starting state so we can try again.
|
akeeton/MTGO-scry-bug-test
|
MTGO-scry-bug-test.sikuli/MTGO-scry-bug-test.py
|
Python
|
mit
| 5,599 | 0.008573 |
import pyaudio
import struct
from threading import Thread, Condition
import time
from logging import thread
import socket
CHUNK = 2**12
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
class AudioReader(Thread):
def __init__(self, raw = False, remote = False, host = 'localhost', port = 9999):
Thread.__init__(self)
self.active = False
self.listeners = []
self.condition = Condition()
self.quit = False
self.raw = raw
self.remote = remote
self.host = host
self.port = port
def pause(self):
self.active = False
def play(self):
self.active = True
self.condition.acquire()
self.condition.notify()
self.condition.release()
def stop(self):
if not self.active:
self.play()
self.active = False
self.quit = True
def readData(self):
self.condition.acquire()
self.condition.wait()
self.condition.release()
self.stream = pyaudio.PyAudio().open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
while self.active:
data = self.stream.read(CHUNK)
if not self.raw:
count = len(data) / 2
fmt = "%dh" % (count)
shorts = struct.unpack(fmt, data)
else:
shorts = data
for l in self.listeners:
l(shorts)
self.stream.close()
def readRemoteData(self):
self.condition.acquire()
self.condition.wait()
self.condition.release()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.host, self.port))
buf = []
while self.active:
data = self.socket.recv((CHUNK*2-len(buf))*2)
if not self.raw:
count = len(data) / 2
fmt = "%dh" % (count)
shorts = struct.unpack(fmt, data)
buf.extend(shorts)
if len(buf)>=CHUNK*2:
for l in self.listeners:
l(buf)
buf=[]
else:
for l in self.listeners:
l(data)
self.socket.close()
def run(self):
while not self.quit:
if not self.remote:
self.readData()
else:
self.readRemoteData()
|
nanoscopy/afm-calibrator
|
nanoscopy/audio.py
|
Python
|
mit
| 2,692 | 0.015602 |
# Tweepy
# Copyright 2009-2022 Joshua Roesslein
# See LICENSE for details.
from collections.abc import Mapping
class EqualityComparableID:
__slots__ = ()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.id == other.id
return NotImplemented
class HashableID(EqualityComparableID):
__slots__ = ()
def __hash__(self):
return self.id
class DataMapping(Mapping):
__slots__ = ()
def __contains__(self, item):
return item in self.data
def __getattr__(self, name):
try:
return self.data[name]
except KeyError:
raise AttributeError from None
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError:
raise KeyError from None
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
|
tweepy/tweepy
|
tweepy/mixins.py
|
Python
|
mit
| 949 | 0.001054 |
import sys, os
def timeSplit( ETR ):
h = int(ETR/3600)
m = int(ETR - 3600*h)/60
s = int(ETR - 3600*h - 60*m)
return h, m, s
def printProgress( current, total, deltaIter, deltaTime ):
terminalString = "\rProgress: "
if total==0: total+=1
percent = 100.*current/total
nDots = int(percent/5)
dotsString = "[" + nDots*"." + (20-nDots)*" " + "]"
percentString = "{0:.0f}%".format(percent)
ETR = deltaTime*(total - current)/float(deltaIter)
hours = int(ETR/3600)
minutes = int(ETR - 3600*hours)/60
seconds = int(ETR - 3600*hours - 60*minutes)
ETRstring = " ETR= {0}:{1:02}:{2:02} ".format(hours, minutes, seconds)
if deltaTime < 0.0001: ETRstring = " ETR= "
terminalString += dotsString + percentString + ETRstring
sys.stdout. write(terminalString)
sys.stdout.flush()
def printProgressTime( current, total, deltaTime ):
terminalString = "\rProgress: "
if total==0: total+=1
percent = 100.*current/total
nDots = int(percent/5)
dotsString = "[" + nDots*"." + (20-nDots)*" " + "]"
percentString = "{0:.0f}%".format(percent)
if current != 0:
ETR = (deltaTime*(total - current))/float(current)
#print ETR
hours = int(ETR/3600)
minutes = int(ETR - 3600*hours)/60
seconds = int(ETR - 3600*hours - 60*minutes)
ETRstring = " ETR= {0}:{1:02}:{2:02} ".format(hours, minutes, seconds)
else: ETRstring = " ETR= "
if deltaTime < 0.0001: ETRstring = " ETR= "
terminalString += dotsString + percentString + ETRstring
sys.stdout. write(terminalString)
sys.stdout.flush()
def ensureDirectory( dirName ):
if not os.path.exists(dirName):
os.makedirs(dirName)
|
bvillasen/phyGPU
|
tools/tools.py
|
Python
|
gpl-3.0
| 1,660 | 0.037349 |
#
# This file is part of opsd.
#
# opsd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# opsd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with opsd. If not, see <http://www.gnu.org/licenses/>.
"""Interface to allow the dome controller to operate an Astrohaven dome via domed"""
from warwick.observatory.dome import (
CommandStatus as DomeCommandStatus,
DomeShutterStatus,
DomeHeartbeatStatus)
from warwick.observatory.operations.constants import DomeStatus
from warwick.observatory.common import daemons, validation
CONFIG_SCHEMA = {
'type': 'object',
'additionalProperties': ['module'],
'required': [
'daemon', 'movement_timeout', 'heartbeat_timeout'
],
'properties': {
'daemon': {
'type': 'string',
'daemon_name': True
},
'movement_timeout': {
'type': 'number',
'minimum': 0
},
'heartbeat_timeout': {
'type': 'number',
'minimum': 0
}
}
}
def validate_config(config_json):
return validation.validation_errors(config_json, CONFIG_SCHEMA, {
'daemon_name': validation.daemon_name_validator,
})
class DomeInterface:
"""Interface to allow the dome controller to operate an Astrohaven dome via domed"""
def __init__(self, dome_config_json):
self._daemon = getattr(daemons, dome_config_json['daemon'])
# Communications timeout when opening or closing the dome (takes up to ~80 seconds for the onemetre dome)
self._movement_timeout = dome_config_json['movement_timeout']
# Timeout period (seconds) for the dome controller
# The dome heartbeat is pinged once per LOOP_DELAY when the dome is under
# automatic control and is fully open or fully closed. This timeout should
# be large enough to account for the time it takes to open and close the dome
self._heartbeat_timeout = dome_config_json['heartbeat_timeout']
def query_status(self):
with self._daemon.connect() as dome:
status = dome.status()
if status['heartbeat_status'] in [DomeHeartbeatStatus.TrippedClosing,
DomeHeartbeatStatus.TrippedIdle]:
return DomeStatus.Timeout
if status['shutter_a'] == DomeShutterStatus.Closed and \
status['shutter_b'] == DomeShutterStatus.Closed:
return DomeStatus.Closed
if status['shutter_a'] in [DomeShutterStatus.Opening, DomeShutterStatus.Closing] or \
status['shutter_b'] in [DomeShutterStatus.Opening, DomeShutterStatus.Closing]:
return DomeStatus.Moving
return DomeStatus.Open
def ping_heartbeat(self):
print('dome: sending heartbeat ping')
with self._daemon.connect() as dome:
ret = dome.set_heartbeat_timer(self._heartbeat_timeout)
return ret == DomeCommandStatus.Succeeded
def disable_heartbeat(self):
print('dome: disabling heartbeat')
with self._daemon.connect() as dome:
ret = dome.set_heartbeat_timer(self._heartbeat_timeout)
return ret == DomeCommandStatus.Succeeded
def close(self):
print('dome: sending heartbeat ping before closing')
with self._daemon.connect() as dome:
dome.set_heartbeat_timer(self._heartbeat_timeout)
print('dome: closing')
with self._daemon.connect(timeout=self._movement_timeout) as dome:
ret = dome.close_shutters('ba')
return ret == DomeCommandStatus.Succeeded
def open(self):
print('dome: sending heartbeat ping before opening')
with self._daemon.connect() as dome:
dome.set_heartbeat_timer(self._heartbeat_timeout)
print('dome: opening')
with self._daemon.connect(timeout=self._movement_timeout) as dome:
ret = dome.open_shutters('ab')
return ret == DomeCommandStatus.Succeeded
|
warwick-one-metre/opsd
|
warwick/observatory/operations/dome/astrohaven/__init__.py
|
Python
|
gpl-3.0
| 4,416 | 0.001812 |
"""Twitter crawler script"""
import tweepy
from database import MongoDB
class Twitter(object): # pylint: disable=too-few-public-methods
"""Class Twitter"""
def __init__(self):
self.consumer_key = "40GvlhlFPNbVGkZnPncPH8DgB"
self.consumer_secret = "G595ceskX8iVH34rsuLSqpFROL0brp8ezzZR2dGvTKvcpPsKPw"
self.access_token = "397905190-LXMFC0clhtDxx5cITBWVFqVUKNQBKuqM06Ls4k5n"
self.access_token_secret = "nPzoHy5UwzOPUZVZO3JhBFRL3WgdM0jJKignxIzQ6nAS1"
self.auth = tweepy.OAuthHandler(self.consumer_key, self.consumer_secret)
self.auth.set_access_token(self.access_token, self.access_token_secret)
self.api = tweepy.API(self.auth)
# Method to print our tweets
def print_tweets(self, count=1):
tweets = self._user_timeline(count)
for tweet in tweets:
print tweet.encode('utf-8')
# Method to save our tweets
def save_tweets(self, count=1):
database = MongoDB("verificacion")
coll = database.collection("tweets")
tweets = self._user_timeline(count)
for tweet in tweets:
coll.insert({"tweet": tweet})
# Returns the *count* numbers of tweets of your timeline and save it into a database
def _user_timeline(self, count=200):
tweets = []
public_tweets = self.api.user_timeline(id=self.auth.get_username(), count=count)
for tweet in public_tweets:
text = tweet.text
tweets.append(text)
return tweets
if __name__ == '__main__':
twepp = Twitter()
twepp.print_tweets(10)
twepp.save_tweets(10)
|
rslnautic/practica-verificacion
|
src/tweet_crawler.py
|
Python
|
apache-2.0
| 1,614 | 0.004337 |
# -*- coding: utf-8 -*-
"""
Kay preparse management command.
:Copyright: (c) 2009 Accense Technology, Inc.
Takashi Matsuo <tmatsuo@candit.jp>,
All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from os import listdir, path, mkdir
from werkzeug.utils import import_string
import kay
import kay.app
from kay.utils import local
from kay.utils.jinja2utils.compiler import compile_dir
from kay.management.utils import print_status
IGNORE_FILENAMES = {
'kay': ('debug', 'app_template'),
'app': ('kay'),
}
def find_template_dir(target_path, ignore_filenames):
ret = []
for filename in listdir(target_path):
target_fullpath = path.join(target_path, filename)
if path.isdir(target_fullpath):
if filename.startswith(".") or filename in ignore_filenames:
continue
if filename == "templates":
ret.append(target_fullpath)
else:
ret = ret + find_template_dir(target_fullpath, ignore_filenames)
else:
continue
return ret
def do_preparse_bundle():
"""
Pre compile all the jinja2 templates in Kay itself.
"""
print_status("Compiling bundled templates...")
app = kay.app.get_application()
env = app.app.jinja2_env
for dir in find_template_dir(kay.KAY_DIR, ('debug','app_template')):
dest = prepare_destdir(dir)
print_status("Now compiling templates in %s to %s." % (dir, dest))
compile_dir(env, dir, dest)
print_status("Finished compiling bundled templates...")
def do_preparse_apps():
"""
Pre compile all the jinja2 templates in your applications.
"""
from kay.conf import LazySettings
print_status("Compiling templates...")
application = kay.app.get_application()
applications = [application]
settings_treated = []
for key, settings_name in \
application.app.app_settings.PER_DOMAIN_SETTINGS.iteritems():
if not settings_name in settings_treated:
applications.append(kay.app.get_application(
settings=LazySettings(settings_module=settings_name)))
settings_treated.append(settings_name)
for app in applications:
compile_app_templates(app.app) # pass KayApp instance
for key, submount_app in app.mounts.iteritems():
if isinstance(submount_app, kay.app.KayApp):
compile_app_templates(submount_app)
print_status("Finished compiling templates...")
def prepare_destdir(dir):
def replace_dirname(orig):
if 'templates' in orig:
return orig.replace('templates', 'templates_compiled')
else:
return orig+'_compiled'
dest = replace_dirname(dir)
if path.isdir(dest):
for d, subdirs, files in os.walk(dest):
for f in files:
compiled_filename = "%s/%s" % (d, f)
orig_filename = compiled_filename.replace(dest, dir)
if not path.isfile(orig_filename):
os.unlink(compiled_filename)
print_status("%s does not exist. So, '%s' is removed." % (
orig_filename, compiled_filename))
else:
mkdir(dest)
return dest
def compile_app_templates(app):
env = app.jinja2_env
target_dirs = [dir for dir in app.app_settings.TEMPLATE_DIRS\
if os.path.isdir(dir)]
for app in app.app_settings.INSTALLED_APPS:
if app.startswith("kay."):
continue
mod = import_string(app)
target_dirs.extend(find_template_dir(os.path.dirname(mod.__file__),
('kay')))
for dir in target_dirs:
dest = prepare_destdir(dir)
print_status("Now compiling templates in %s to %s." % (dir, dest))
compile_dir(env, dir, dest)
|
IanLewis/kay
|
kay/management/preparse.py
|
Python
|
bsd-3-clause
| 3,612 | 0.013566 |
from linked_list import LinkedList
class Stack(object):
def __init__(self, iterable=None):
self._list = LinkedList(iterable)
def push(self, val):
self._list.insert(val)
def pop(self):
return self._list.pop()
|
HeyIamJames/Data_Structures
|
stack.py
|
Python
|
gpl-2.0
| 248 | 0.008065 |
#!/usr/bin/env python
# Dependencies.py - discover, read, and write dependencies file for make.
# The format like the output from "g++ -MM" which produces a
# list of header (.h) files used by source files (.cxx).
# As a module, provides
# FindPathToHeader(header, includePath) -> path
# FindHeadersInFile(filePath) -> [headers]
# FindHeadersInFileRecursive(filePath, includePath, renames) -> [paths]
# FindDependencies(sourceGlobs, includePath, objExt, startDirectory, renames) -> [dependencies]
# ExtractDependencies(input) -> [dependencies]
# TextFromDependencies(dependencies)
# WriteDependencies(output, dependencies)
# UpdateDependencies(filepath, dependencies)
# PathStem(p) -> stem
# InsertSynonym(dependencies, current, additional) -> [dependencies]
# If run as a script reads from stdin and writes to stdout.
# Only tested with ASCII file names.
# Copyright 2019 by Neil Hodgson <neilh@scintilla.org>
# The License.txt file describes the conditions under which this software may be distributed.
# Requires Python 2.7 or later
import codecs, glob, os, sys
if __name__ == "__main__":
import FileGenerator
else:
from . import FileGenerator
continuationLineEnd = " \\"
def FindPathToHeader(header, includePath):
for incDir in includePath:
relPath = os.path.join(incDir, header)
if os.path.exists(relPath):
return relPath
return ""
fhifCache = {} # Remember the includes in each file. ~5x speed up.
def FindHeadersInFile(filePath):
if filePath not in fhifCache:
headers = []
with codecs.open(filePath, "r", "utf-8") as f:
for line in f:
if line.strip().startswith("#include"):
parts = line.split()
if len(parts) > 1:
header = parts[1]
if header[0] != '<': # No system headers
headers.append(header.strip('"'))
fhifCache[filePath] = headers
return fhifCache[filePath]
def FindHeadersInFileRecursive(filePath, includePath, renames):
headerPaths = []
for header in FindHeadersInFile(filePath):
if header in renames:
header = renames[header]
relPath = FindPathToHeader(header, includePath)
if relPath and relPath not in headerPaths:
headerPaths.append(relPath)
subHeaders = FindHeadersInFileRecursive(relPath, includePath, renames)
headerPaths.extend(sh for sh in subHeaders if sh not in headerPaths)
return headerPaths
def RemoveStart(relPath, start):
if relPath.startswith(start):
return relPath[len(start):]
return relPath
def ciKey(f):
return f.lower()
def FindDependencies(sourceGlobs, includePath, objExt, startDirectory, renames={}):
deps = []
for sourceGlob in sourceGlobs:
sourceFiles = glob.glob(sourceGlob)
# Sorting the files minimizes deltas as order returned by OS may be arbitrary
sourceFiles.sort(key=ciKey)
for sourceName in sourceFiles:
objName = os.path.splitext(os.path.basename(sourceName))[0]+objExt
headerPaths = FindHeadersInFileRecursive(sourceName, includePath, renames)
depsForSource = [sourceName] + headerPaths
depsToAppend = [RemoveStart(fn.replace("\\", "/"), startDirectory) for
fn in depsForSource]
deps.append([objName, depsToAppend])
return deps
def PathStem(p):
""" Return the stem of a filename: "CallTip.o" -> "CallTip" """
return os.path.splitext(os.path.basename(p))[0]
def InsertSynonym(dependencies, current, additional):
""" Insert a copy of one object file with dependencies under a different name.
Used when one source file is used to create two object files with different
preprocessor definitions. """
result = []
for dep in dependencies:
result.append(dep)
if (dep[0] == current):
depAdd = [additional, dep[1]]
result.append(depAdd)
return result
def ExtractDependencies(input):
""" Create a list of dependencies from input list of lines
Each element contains the name of the object and a list of
files that it depends on.
Dependencies that contain "/usr/" are removed as they are system headers. """
deps = []
for line in input:
headersLine = line.startswith(" ") or line.startswith("\t")
line = line.strip()
isContinued = line.endswith("\\")
line = line.rstrip("\\ ")
fileNames = line.strip().split(" ")
if not headersLine:
# its a source file line, there may be headers too
sourceLine = fileNames[0].rstrip(":")
fileNames = fileNames[1:]
deps.append([sourceLine, []])
deps[-1][1].extend(header for header in fileNames if "/usr/" not in header)
return deps
def TextFromDependencies(dependencies):
""" Convert a list of dependencies to text. """
text = ""
indentHeaders = "\t"
joinHeaders = continuationLineEnd + os.linesep + indentHeaders
for dep in dependencies:
object, headers = dep
text += object + ":"
for header in headers:
text += joinHeaders
text += header
if headers:
text += os.linesep
return text
def UpdateDependencies(filepath, dependencies, comment=""):
""" Write a dependencies file if different from dependencies. """
FileGenerator.UpdateFile(os.path.abspath(filepath), comment.rstrip() + os.linesep +
TextFromDependencies(dependencies))
def WriteDependencies(output, dependencies):
""" Write a list of dependencies out to a stream. """
output.write(TextFromDependencies(dependencies))
if __name__ == "__main__":
""" Act as a filter that reformats input dependencies to one per line. """
inputLines = sys.stdin.readlines()
deps = ExtractDependencies(inputLines)
WriteDependencies(sys.stdout, deps)
|
apmckinlay/csuneido
|
vs2019scintilla/scripts/Dependencies.py
|
Python
|
gpl-2.0
| 5,533 | 0.023857 |
"""
Module for Image annotations using annotator.
"""
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xblock.core import Scope, String
from xmodule.annotator_mixin import get_instructions, html_to_text
from xmodule.annotator_token import retrieve_token
from xblock.fragment import Fragment
import textwrap
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class AnnotatableFields(object):
""" Fields for `ImageModule` and `ImageDescriptor`. """
data = String(help=_("XML data for the annotation"),
scope=Scope.content,
default=textwrap.dedent("""\
<annotatable>
<instructions>
<p>
Add the instructions to the assignment here.
</p>
</instructions>
<p>
Lorem ipsum dolor sit amet, at amet animal petentium nec. Id augue nemore postulant mea. Ex eam dicant noluisse expetenda, alia admodum abhorreant qui et. An ceteros expetenda mea, tale natum ipsum quo no, ut pro paulo alienum noluisse.
</p>
<json>
navigatorSizeRatio: 0.25,
wrapHorizontal: false,
showNavigator: true,
navigatorPosition: "BOTTOM_LEFT",
showNavigationControl: true,
tileSources: [{"profile": "http://library.stanford.edu/iiif/image-api/1.1/compliance.html#level2", "scale_factors": [1, 2, 4, 8, 16, 32, 64], "tile_height": 1024, "height": 3466, "width": 113793, "tile_width": 1024, "qualities": ["native", "bitonal", "grey", "color"], "formats": ["jpg", "png", "gif"], "@context": "http://library.stanford.edu/iiif/image-api/1.1/context.json", "@id": "http://54.187.32.48/loris/suzhou_orig.jp2"}],
</json>
</annotatable>
"""))
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default=_('Image Annotation'),
)
instructor_tags = String(
display_name=_("Tags for Assignments"),
help=_("Add tags that automatically highlight in a certain color using the comma-separated form, i.e. imagery:red,parallelism:blue"),
scope=Scope.settings,
default='professor:green,teachingAssistant:blue',
)
annotation_storage_url = String(
help=_("Location of Annotation backend"),
scope=Scope.settings,
default="http://your_annotation_storage.com",
display_name=_("Url for Annotation Storage")
)
annotation_token_secret = String(
help=_("Secret string for annotation storage"),
scope=Scope.settings,
default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
display_name=_("Secret Token String for Annotation")
)
default_tab = String(
display_name=_("Default Annotations Tab"),
help=_("Select which tab will be the default in the annotations table: myNotes, Instructor, or Public."),
scope=Scope.settings,
default="myNotes",
)
# currently only supports one instructor, will build functionality for multiple later
instructor_email = String(
display_name=_("Email for 'Instructor' Annotations"),
help=_("Email of the user that will be attached to all annotations that will be found in 'Instructor' tab."),
scope=Scope.settings,
default="",
)
annotation_mode = String(
display_name=_("Mode for Annotation Tool"),
help=_("Type in number corresponding to following modes: 'instructor' or 'everyone'"),
scope=Scope.settings,
default="everyone",
)
class ImageAnnotationModule(AnnotatableFields, XModule):
'''Image Annotation Module'''
js = {
'coffee': [
resource_string(__name__, 'js/src/javascript_loader.coffee'),
resource_string(__name__, 'js/src/html/display.coffee'),
resource_string(__name__, 'js/src/annotatable/display.coffee'),
],
'js': [
resource_string(__name__, 'js/src/collapsible.js'),
]
}
css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]}
icon_class = 'imageannotation'
def __init__(self, *args, **kwargs):
super(ImageAnnotationModule, self).__init__(*args, **kwargs)
xmltree = etree.fromstring(self.data)
self.instructions = self._extract_instructions(xmltree)
self.openseadragonjson = html_to_text(etree.tostring(xmltree.find('json'), encoding='unicode'))
self.user_email = ""
self.is_course_staff = False
if self.runtime.get_user_role() in ['instructor', 'staff']:
self.is_course_staff = True
if self.runtime.get_real_user is not None:
try:
self.user_email = self.runtime.get_real_user(self.runtime.anonymous_student_id).email
except Exception: # pylint: disable=broad-except
self.user_email = _("No email address found.")
def _extract_instructions(self, xmltree):
""" Removes <instructions> from the xmltree and returns them as a string, otherwise None. """
return get_instructions(xmltree)
def student_view(self, context):
""" Renders parameters to template. """
context = {
'display_name': self.display_name_with_default,
'instructions_html': self.instructions,
'token': retrieve_token(self.user_email, self.annotation_token_secret),
'tag': self.instructor_tags,
'openseadragonjson': self.openseadragonjson,
'annotation_storage': self.annotation_storage_url,
'default_tab': self.default_tab,
'instructor_email': self.instructor_email,
'annotation_mode': self.annotation_mode,
'is_course_staff': self.is_course_staff,
}
fragment = Fragment(self.system.render_template('imageannotation.html', context))
# TinyMCE already exists in Studio so we should not load the files again
# get_real_user always returns "None" in Studio since its runtimes contains no anonymous ids
if self.runtime.get_real_user is not None:
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/tinymce.full.min.js")
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/jquery.tinymce.min.js")
return fragment
class ImageAnnotationDescriptor(AnnotatableFields, RawDescriptor): # pylint: disable=abstract-method
''' Image annotation descriptor '''
module_class = ImageAnnotationModule
mako_template = "widgets/raw-edit.html"
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(ImageAnnotationDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
ImageAnnotationDescriptor.annotation_storage_url,
ImageAnnotationDescriptor.annotation_token_secret,
])
return non_editable_fields
|
c0710204/edx-platform
|
common/lib/xmodule/xmodule/imageannotation_module.py
|
Python
|
agpl-3.0
| 7,154 | 0.002935 |
#task_H
def dijkstra(start, graph):
n = len(graph)
D = [None] * n
D[start] = 0
index = 0
Q = [start]
while index < len(Q):
v = Q[index]
index += 1
for u in graph[v]:
if D[u] == None or D[v] + min(graph[v][u]) < D[u]:
D[u] = D[v] + min(graph[v][u])
Q.append(u)
return D
def reverse(graph):
n = len(graph)
graph_reversed = {x: {} for x, y in zip(range(n), range(n))}
for i in range(n):
for v in graph[i]:
for w in graph[i][v]:
add(graph_reversed, v, i, w)
def add(graph, a, b, w):
if b in graph[a]:
grph[a][b].append(w)
else:
graph[a][b] = [w]
def min_vertex(x, D, graph):
A = {v: w + D[v] for v, w in zip([u for u in graph[x].keys if D[u] != None], [min(graph[x][u]) for u in graph[x].keys if D[u] != None])}
L = list(A.items)
min_i = L[0][0]
min_v = L[0][1]
for v in A:
if A[v] < min_v:
min_v = A[v]
min_i = v
return min_i
def path(graph, D, s, f):
graph = reverse(graph)
x = f
P = [f]
while x != s:
x = min_vertex(x, D, graph)
P.append(x)
return P[-1::-1]
n, m, s, f = tuple(map(int, input().split()))
graph = {x: {} for x, y in zip(range(n), range(n))}
for i in range(m):
a, b, w = tuple(map(int, input().split()))
add(graph, a, b, w)
add(graph, b, a, w)
D = dijkstra(s, graph)
print(*path(graph, D, s, f))
|
Senbjorn/mipt_lab_2016
|
lab_19/task_H.py
|
Python
|
gpl-3.0
| 1,281 | 0.042155 |
from . import test_attachment
fast_suite = [test_attachment,
]
|
meta-it/misc-addons
|
attachment_large_object/tests/__init__.py
|
Python
|
lgpl-3.0
| 78 | 0 |
from .forms import SetupForm
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from splunkdj.decorators.render import render_to
from splunkdj.setup import create_setup_view_context
@login_required
def home(request):
# Redirect to the default view, which happens to be a non-framework view
return redirect('/en-us/app/twitter2/twitter_general')
@render_to('twitter2:setup.html')
@login_required
def setup(request):
result = create_setup_view_context(
request,
SetupForm,
reverse('twitter2:home'))
# HACK: Workaround DVPL-4647 (Splunk 6.1 and below):
# Refresh current app's state so that non-framework views
# observe when the app becomes configured.
service = request.service
app_name = service.namespace['app']
service.apps[app_name].post('_reload')
return result
|
dakiri/splunk-app-twitter
|
twitter2/django/twitter2/views.py
|
Python
|
apache-2.0
| 944 | 0.004237 |
"""Decorators for labeling test objects
Decorators that merely return a modified version of the original
function object are straightforward. Decorators that return a new
function object need to use
nose.tools.make_decorator(original_function)(decorator) in returning
the decorator, in order to preserve metadata such as function name,
setup and teardown functions and so on - see nose.tools for more
information.
"""
import warnings
import sys
def slow(t):
"""Labels a test as 'slow'.
The exact definition of a slow test is obviously both subjective and
hardware-dependent, but in general any individual test that requires more
than a second or two should be labeled as slow (the whole suite consits of
thousands of tests, so even a second is significant)."""
t.slow = True
return t
def setastest(tf=True):
''' Signals to nose that this function is or is not a test
Parameters
----------
tf : bool
If True specifies this is a test, not a test otherwise
e.g
>>> from numpy.testing.decorators import setastest
>>> @setastest(False)
... def func_with_test_in_name(arg1, arg2): pass
...
>>>
This decorator cannot use the nose namespace, because it can be
called from a non-test module. See also istest and nottest in
nose.tools
'''
def set_test(t):
t.__test__ = tf
return t
return set_test
def skipif(skip_condition, msg=None):
''' Make function raise SkipTest exception if skip_condition is true
Parameters
----------
skip_condition : bool or callable.
Flag to determine whether to skip test. If the condition is a
callable, it is used at runtime to dynamically make the decision. This
is useful for tests that may require costly imports, to delay the cost
until the test suite is actually executed.
msg : string
Message to give on raising a SkipTest exception
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when the skip_condition was True, and the function
to be called normally otherwise.
Notes
-----
You will see from the code that we had to further decorate the
decorator with the nose.tools.make_decorator function in order to
transmit function name, and various other metadata.
'''
def skip_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
# Allow for both boolean or callable skip conditions.
if callable(skip_condition):
skip_val = lambda : skip_condition()
else:
skip_val = lambda : skip_condition
def get_msg(func,msg=None):
"""Skip message with information about function being skipped."""
if msg is None:
out = 'Test skipped due to test condition'
else:
out = '\n'+msg
return "Skipping test: %s%s" % (func.__name__,out)
# We need to define *two* skippers because Python doesn't allow both
# return with value and yield inside the same function.
def skipper_func(*args, **kwargs):
"""Skipper for normal test functions."""
if skip_val():
raise nose.SkipTest(get_msg(f,msg))
else:
return f(*args, **kwargs)
def skipper_gen(*args, **kwargs):
"""Skipper for test generators."""
if skip_val():
raise nose.SkipTest(get_msg(f,msg))
else:
for x in f(*args, **kwargs):
yield x
# Choose the right skipper to use when building the actual decorator.
if nose.util.isgenerator(f):
skipper = skipper_gen
else:
skipper = skipper_func
return nose.tools.make_decorator(f)(skipper)
return skip_decorator
def knownfailureif(fail_condition, msg=None):
''' Make function raise KnownFailureTest exception if fail_condition is true
Parameters
----------
fail_condition : bool or callable.
Flag to determine whether to mark test as known failure (True)
or not (False). If the condition is a callable, it is used at
runtime to dynamically make the decision. This is useful for
tests that may require costly imports, to delay the cost
until the test suite is actually executed.
msg : string
Message to give on raising a KnownFailureTest exception
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when the skip_condition was True, and the function
to be called normally otherwise.
Notes
-----
You will see from the code that we had to further decorate the
decorator with the nose.tools.make_decorator function in order to
transmit function name, and various other metadata.
'''
if msg is None:
msg = 'Test skipped due to known failure'
# Allow for both boolean or callable known failure conditions.
if callable(fail_condition):
fail_val = lambda : fail_condition()
else:
fail_val = lambda : fail_condition
def knownfail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
from noseclasses import KnownFailureTest
def knownfailer(*args, **kwargs):
if fail_val():
raise KnownFailureTest, msg
else:
return f(*args, **kwargs)
return nose.tools.make_decorator(f)(knownfailer)
return knownfail_decorator
# The following two classes are copied from python 2.6 warnings module (context
# manager)
class WarningMessage(object):
"""Holds the result of a single showwarning() call."""
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
if category:
self._category_name = category.__name__
else:
self._category_name = None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class WarningManager:
def __init__(self, record=False, module=None):
self._record = record
if module is None:
self._module = sys.modules['warnings']
else:
self._module = module
self._entered = False
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
def deprecated(conditional=True):
"""This decorator can be used to filter Deprecation Warning, to avoid
printing them during the test suite run, while checking that the test
actually raises a DeprecationWarning.
Parameters
----------
conditional : bool or callable.
Flag to determine whether to mark test as deprecated or not. If the
condition is a callable, it is used at runtime to dynamically make the
decision.
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when the skip_condition was True, and the function
to be called normally otherwise.
Notes
-----
.. versionadded:: 1.4.0
"""
def deprecate_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
from noseclasses import KnownFailureTest
def _deprecated_imp(*args, **kwargs):
# Poor man's replacement for the with statement
ctx = WarningManager(record=True)
l = ctx.__enter__()
warnings.simplefilter('always')
try:
f(*args, **kwargs)
if not len(l) > 0:
raise AssertionError("No warning raised when calling %s"
% f.__name__)
if not l[0].category is DeprecationWarning:
raise AssertionError("First warning for %s is not a " \
"DeprecationWarning( is %s)" % (f.__name__, l[0]))
finally:
ctx.__exit__()
if callable(conditional):
cond = conditional()
else:
cond = conditional
if cond:
return nose.tools.make_decorator(f)(_deprecated_imp)
else:
return f
return deprecate_decorator
|
illume/numpy3k
|
numpy/testing/decorators.py
|
Python
|
bsd-3-clause
| 9,735 | 0.003287 |
"""Test the Advantage Air Sensor Platform."""
from datetime import timedelta
from json import loads
from homeassistant.components.advantage_air.const import DOMAIN as ADVANTAGE_AIR_DOMAIN
from homeassistant.components.advantage_air.sensor import (
ADVANTAGE_AIR_SERVICE_SET_TIME_TO,
ADVANTAGE_AIR_SET_COUNTDOWN_VALUE,
)
from homeassistant.config_entries import RELOAD_AFTER_UPDATE_DELAY
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.helpers import entity_registry as er
from homeassistant.util import dt
from tests.common import async_fire_time_changed
from tests.components.advantage_air import (
TEST_SET_RESPONSE,
TEST_SET_URL,
TEST_SYSTEM_DATA,
TEST_SYSTEM_URL,
add_mock_config,
)
async def test_sensor_platform(hass, aioclient_mock):
"""Test sensor platform."""
aioclient_mock.get(
TEST_SYSTEM_URL,
text=TEST_SYSTEM_DATA,
)
aioclient_mock.get(
TEST_SET_URL,
text=TEST_SET_RESPONSE,
)
await add_mock_config(hass)
registry = er.async_get(hass)
assert len(aioclient_mock.mock_calls) == 1
# Test First TimeToOn Sensor
entity_id = "sensor.ac_one_time_to_on"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 0
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-timetoOn"
value = 20
await hass.services.async_call(
ADVANTAGE_AIR_DOMAIN,
ADVANTAGE_AIR_SERVICE_SET_TIME_TO,
{ATTR_ENTITY_ID: [entity_id], ADVANTAGE_AIR_SET_COUNTDOWN_VALUE: value},
blocking=True,
)
assert len(aioclient_mock.mock_calls) == 3
assert aioclient_mock.mock_calls[-2][0] == "GET"
assert aioclient_mock.mock_calls[-2][1].path == "/setAircon"
data = loads(aioclient_mock.mock_calls[-2][1].query["json"])
assert data["ac1"]["info"]["countDownToOn"] == value
assert aioclient_mock.mock_calls[-1][0] == "GET"
assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData"
# Test First TimeToOff Sensor
entity_id = "sensor.ac_one_time_to_off"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 10
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-timetoOff"
value = 0
await hass.services.async_call(
ADVANTAGE_AIR_DOMAIN,
ADVANTAGE_AIR_SERVICE_SET_TIME_TO,
{ATTR_ENTITY_ID: [entity_id], ADVANTAGE_AIR_SET_COUNTDOWN_VALUE: value},
blocking=True,
)
assert len(aioclient_mock.mock_calls) == 5
assert aioclient_mock.mock_calls[-2][0] == "GET"
assert aioclient_mock.mock_calls[-2][1].path == "/setAircon"
data = loads(aioclient_mock.mock_calls[-2][1].query["json"])
assert data["ac1"]["info"]["countDownToOff"] == value
assert aioclient_mock.mock_calls[-1][0] == "GET"
assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData"
# Test First Zone Vent Sensor
entity_id = "sensor.zone_open_with_sensor_vent"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 100
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z01-vent"
# Test Second Zone Vent Sensor
entity_id = "sensor.zone_closed_with_sensor_vent"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 0
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z02-vent"
# Test First Zone Signal Sensor
entity_id = "sensor.zone_open_with_sensor_signal"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 40
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z01-signal"
# Test Second Zone Signal Sensor
entity_id = "sensor.zone_closed_with_sensor_signal"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 10
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z02-signal"
# Test First Zone Temp Sensor (disabled by default)
entity_id = "sensor.zone_open_with_sensor_temperature"
assert not hass.states.get(entity_id)
registry.async_update_entity(entity_id=entity_id, disabled_by=None)
await hass.async_block_till_done()
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(seconds=RELOAD_AFTER_UPDATE_DELAY + 1),
)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 25
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z01-temp"
|
aronsky/home-assistant
|
tests/components/advantage_air/test_sensor.py
|
Python
|
apache-2.0
| 4,781 | 0.000627 |
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import survey_update_wizard
|
MostlyOpen/odoo_addons
|
myo_survey/wizard/__init__.py
|
Python
|
agpl-3.0
| 936 | 0 |
import subprocess
from typing import List, Optional
from approvaltests import ensure_file_exists
from approvaltests.command import Command
from approvaltests.core.reporter import Reporter
from approvaltests.utils import to_json
PROGRAM_FILES = "{ProgramFiles}"
class GenericDiffReporterConfig:
def __init__(self, name: str, path: str, extra_args: Optional[List[str]] = None):
self.name = name
self.path = path
self.extra_args = extra_args or []
def serialize(self):
result = [self.name, self.path]
if self.extra_args:
result.append(self.extra_args)
return result
def create_config(config) -> GenericDiffReporterConfig:
return GenericDiffReporterConfig(*config)
class GenericDiffReporter(Reporter):
"""
A reporter that launches
an external diff tool given by config.
"""
@staticmethod
def create(diff_tool_path: str) -> "GenericDiffReporter":
return GenericDiffReporter(create_config(["custom", diff_tool_path]))
def __init__(self, config: GenericDiffReporterConfig) -> None:
self.name = config.name
self.path = self.expand_program_files(config.path)
self.extra_args = config.extra_args
def __str__(self) -> str:
if self.extra_args:
config = {
"name": self.name,
"path": self.path,
"arguments": self.extra_args,
}
else:
config = {"name": self.name, "path": self.path}
return to_json(config)
@staticmethod
def run_command(command_array):
subprocess.Popen(command_array)
def get_command(self, received: str, approved: str) -> List[str]:
return [self.path] + self.extra_args + [received, approved]
def report(self, received_path: str, approved_path: str) -> bool:
if not self.is_working():
return False
ensure_file_exists(approved_path)
command_array = self.get_command(received_path, approved_path)
self.run_command(command_array)
return True
def is_working(self) -> bool:
found = Command(self.path).locate()
if not found:
return False
else:
self.path = found
return True
@staticmethod
def expand_program_files(path: str) -> str:
if PROGRAM_FILES not in path:
return path
for candidate in [
r"C:/Program Files",
r"C:/Program Files (x86)",
r"C:/ProgramW6432",
]:
possible = path.replace(PROGRAM_FILES, candidate)
if Command.executable(possible):
return possible
return path.replace(PROGRAM_FILES, "C:/Program Files")
|
approvals/ApprovalTests.Python
|
approvaltests/reporters/generic_diff_reporter.py
|
Python
|
apache-2.0
| 2,752 | 0.00109 |
# commit.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from gitdb import IStream
from git.util import (
hex_to_bin,
Actor,
Iterable,
Stats,
finalize_process
)
from git.diff import Diffable
from .tree import Tree
from . import base
from .util import (
Traversable,
Serializable,
parse_date,
altz_to_utctz_str,
parse_actor_and_date,
from_timestamp,
)
from git.compat import text_type
from time import (
time,
daylight,
altzone,
timezone,
localtime
)
import os
from io import BytesIO
import logging
log = logging.getLogger('git.objects.commit')
log.addHandler(logging.NullHandler())
__all__ = ('Commit', )
class Commit(base.Object, Iterable, Diffable, Traversable, Serializable):
"""Wraps a git Commit object.
This class will act lazily on some of its attributes and will query the
value on demand only if it involves calling the git binary."""
# ENVIRONMENT VARIABLES
# read when creating new commits
env_author_date = "GIT_AUTHOR_DATE"
env_committer_date = "GIT_COMMITTER_DATE"
# CONFIGURATION KEYS
conf_encoding = 'i18n.commitencoding'
# INVARIANTS
default_encoding = "UTF-8"
# object configuration
type = "commit"
__slots__ = ("tree",
"author", "authored_date", "author_tz_offset",
"committer", "committed_date", "committer_tz_offset",
"message", "parents", "encoding", "gpgsig")
_id_attribute_ = "hexsha"
def __init__(self, repo, binsha, tree=None, author=None, authored_date=None, author_tz_offset=None,
committer=None, committed_date=None, committer_tz_offset=None,
message=None, parents=None, encoding=None, gpgsig=None):
"""Instantiate a new Commit. All keyword arguments taking None as default will
be implicitly set on first query.
:param binsha: 20 byte sha1
:param parents: tuple( Commit, ... )
is a tuple of commit ids or actual Commits
:param tree: Tree
Tree object
:param author: Actor
is the author string ( will be implicitly converted into an Actor object )
:param authored_date: int_seconds_since_epoch
is the authored DateTime - use time.gmtime() to convert it into a
different format
:param author_tz_offset: int_seconds_west_of_utc
is the timezone that the authored_date is in
:param committer: Actor
is the committer string
:param committed_date: int_seconds_since_epoch
is the committed DateTime - use time.gmtime() to convert it into a
different format
:param committer_tz_offset: int_seconds_west_of_utc
is the timezone that the committed_date is in
:param message: string
is the commit message
:param encoding: string
encoding of the message, defaults to UTF-8
:param parents:
List or tuple of Commit objects which are our parent(s) in the commit
dependency graph
:return: git.Commit
:note:
Timezone information is in the same format and in the same sign
as what time.altzone returns. The sign is inverted compared to git's
UTC timezone."""
super(Commit, self).__init__(repo, binsha)
if tree is not None:
assert isinstance(tree, Tree), "Tree needs to be a Tree instance, was %s" % type(tree)
if tree is not None:
self.tree = tree
if author is not None:
self.author = author
if authored_date is not None:
self.authored_date = authored_date
if author_tz_offset is not None:
self.author_tz_offset = author_tz_offset
if committer is not None:
self.committer = committer
if committed_date is not None:
self.committed_date = committed_date
if committer_tz_offset is not None:
self.committer_tz_offset = committer_tz_offset
if message is not None:
self.message = message
if parents is not None:
self.parents = parents
if encoding is not None:
self.encoding = encoding
if gpgsig is not None:
self.gpgsig = gpgsig
@classmethod
def _get_intermediate_items(cls, commit):
return commit.parents
def _set_cache_(self, attr):
if attr in Commit.__slots__:
# read the data in a chunk, its faster - then provide a file wrapper
binsha, typename, self.size, stream = self.repo.odb.stream(self.binsha) # @UnusedVariable
self._deserialize(BytesIO(stream.read()))
else:
super(Commit, self)._set_cache_(attr)
# END handle attrs
@property
def authored_datetime(self):
return from_timestamp(self.authored_date, self.author_tz_offset)
@property
def committed_datetime(self):
return from_timestamp(self.committed_date, self.committer_tz_offset)
@property
def summary(self):
""":return: First line of the commit message"""
return self.message.split('\n', 1)[0]
def count(self, paths='', **kwargs):
"""Count the number of commits reachable from this commit
:param paths:
is an optional path or a list of paths restricting the return value
to commits actually containing the paths
:param kwargs:
Additional options to be passed to git-rev-list. They must not alter
the output style of the command, or parsing will yield incorrect results
:return: int defining the number of reachable commits"""
# yes, it makes a difference whether empty paths are given or not in our case
# as the empty paths version will ignore merge commits for some reason.
if paths:
return len(self.repo.git.rev_list(self.hexsha, '--', paths, **kwargs).splitlines())
else:
return len(self.repo.git.rev_list(self.hexsha, **kwargs).splitlines())
@property
def name_rev(self):
"""
:return:
String describing the commits hex sha based on the closest Reference.
Mostly useful for UI purposes"""
return self.repo.git.name_rev(self)
@classmethod
def iter_items(cls, repo, rev, paths='', **kwargs):
"""Find all commits matching the given criteria.
:param repo: is the Repo
:param rev: revision specifier, see git-rev-parse for viable options
:param paths:
is an optional path or list of paths, if set only Commits that include the path
or paths will be considered
:param kwargs:
optional keyword arguments to git rev-list where
``max_count`` is the maximum number of commits to fetch
``skip`` is the number of commits to skip
``since`` all commits since i.e. '1970-01-01'
:return: iterator yielding Commit items"""
if 'pretty' in kwargs:
raise ValueError("--pretty cannot be used as parsing expects single sha's only")
# END handle pretty
# use -- in any case, to prevent possibility of ambiguous arguments
# see https://github.com/gitpython-developers/GitPython/issues/264
args = ['--']
if paths:
args.extend((paths, ))
# END if paths
proc = repo.git.rev_list(rev, args, as_process=True, **kwargs)
return cls._iter_from_process_or_stream(repo, proc)
def iter_parents(self, paths='', **kwargs):
"""Iterate _all_ parents of this commit.
:param paths:
Optional path or list of paths limiting the Commits to those that
contain at least one of the paths
:param kwargs: All arguments allowed by git-rev-list
:return: Iterator yielding Commit objects which are parents of self """
# skip ourselves
skip = kwargs.get("skip", 1)
if skip == 0: # skip ourselves
skip = 1
kwargs['skip'] = skip
return self.iter_items(self.repo, self, paths, **kwargs)
@property
def stats(self):
"""Create a git stat from changes between this commit and its first parent
or from all changes done if this is the very first commit.
:return: git.Stats"""
if not self.parents:
text = self.repo.git.diff_tree(self.hexsha, '--', numstat=True, root=True)
text2 = ""
for line in text.splitlines()[1:]:
(insertions, deletions, filename) = line.split("\t")
text2 += "%s\t%s\t%s\n" % (insertions, deletions, filename)
text = text2
else:
text = self.repo.git.diff(self.parents[0].hexsha, self.hexsha, '--', numstat=True)
return Stats._list_from_string(self.repo, text)
@classmethod
def _iter_from_process_or_stream(cls, repo, proc_or_stream):
"""Parse out commit information into a list of Commit objects
We expect one-line per commit, and parse the actual commit information directly
from our lighting fast object database
:param proc: git-rev-list process instance - one sha per line
:return: iterator returning Commit objects"""
stream = proc_or_stream
if not hasattr(stream, 'readline'):
stream = proc_or_stream.stdout
readline = stream.readline
while True:
line = readline()
if not line:
break
hexsha = line.strip()
if len(hexsha) > 40:
# split additional information, as returned by bisect for instance
hexsha, _ = line.split(None, 1)
# END handle extra info
assert len(hexsha) == 40, "Invalid line: %s" % hexsha
yield Commit(repo, hex_to_bin(hexsha))
# END for each line in stream
# TODO: Review this - it seems process handling got a bit out of control
# due to many developers trying to fix the open file handles issue
if hasattr(proc_or_stream, 'wait'):
finalize_process(proc_or_stream)
@classmethod
def create_from_tree(cls, repo, tree, message, parent_commits=None, head=False, author=None, committer=None,
author_date=None, commit_date=None):
"""Commit the given tree, creating a commit object.
:param repo: Repo object the commit should be part of
:param tree: Tree object or hex or bin sha
the tree of the new commit
:param message: Commit message. It may be an empty string if no message is provided.
It will be converted to a string in any case.
:param parent_commits:
Optional Commit objects to use as parents for the new commit.
If empty list, the commit will have no parents at all and become
a root commit.
If None , the current head commit will be the parent of the
new commit object
:param head:
If True, the HEAD will be advanced to the new commit automatically.
Else the HEAD will remain pointing on the previous commit. This could
lead to undesired results when diffing files.
:param author: The name of the author, optional. If unset, the repository
configuration is used to obtain this value.
:param committer: The name of the committer, optional. If unset, the
repository configuration is used to obtain this value.
:param author_date: The timestamp for the author field
:param commit_date: The timestamp for the committer field
:return: Commit object representing the new commit
:note:
Additional information about the committer and Author are taken from the
environment or from the git configuration, see git-commit-tree for
more information"""
if parent_commits is None:
try:
parent_commits = [repo.head.commit]
except ValueError:
# empty repositories have no head commit
parent_commits = list()
# END handle parent commits
else:
for p in parent_commits:
if not isinstance(p, cls):
raise ValueError("Parent commit '%r' must be of type %s" % (p, cls))
# end check parent commit types
# END if parent commits are unset
# retrieve all additional information, create a commit object, and
# serialize it
# Generally:
# * Environment variables override configuration values
# * Sensible defaults are set according to the git documentation
# COMMITER AND AUTHOR INFO
cr = repo.config_reader()
env = os.environ
committer = committer or Actor.committer(cr)
author = author or Actor.author(cr)
# PARSE THE DATES
unix_time = int(time())
is_dst = daylight and localtime().tm_isdst > 0
offset = altzone if is_dst else timezone
author_date_str = env.get(cls.env_author_date, '')
if author_date:
author_time, author_offset = parse_date(author_date)
elif author_date_str:
author_time, author_offset = parse_date(author_date_str)
else:
author_time, author_offset = unix_time, offset
# END set author time
committer_date_str = env.get(cls.env_committer_date, '')
if commit_date:
committer_time, committer_offset = parse_date(commit_date)
elif committer_date_str:
committer_time, committer_offset = parse_date(committer_date_str)
else:
committer_time, committer_offset = unix_time, offset
# END set committer time
# assume utf8 encoding
enc_section, enc_option = cls.conf_encoding.split('.')
conf_encoding = cr.get_value(enc_section, enc_option, cls.default_encoding)
# if the tree is no object, make sure we create one - otherwise
# the created commit object is invalid
if isinstance(tree, str):
tree = repo.tree(tree)
# END tree conversion
# CREATE NEW COMMIT
new_commit = cls(repo, cls.NULL_BIN_SHA, tree,
author, author_time, author_offset,
committer, committer_time, committer_offset,
message, parent_commits, conf_encoding)
stream = BytesIO()
new_commit._serialize(stream)
streamlen = stream.tell()
stream.seek(0)
istream = repo.odb.store(IStream(cls.type, streamlen, stream))
new_commit.binsha = istream.binsha
if head:
# need late import here, importing git at the very beginning throws
# as well ...
import git.refs
try:
repo.head.set_commit(new_commit, logmsg=message)
except ValueError:
# head is not yet set to the ref our HEAD points to
# Happens on first commit
master = git.refs.Head.create(repo, repo.head.ref, new_commit, logmsg="commit (initial): %s" % message)
repo.head.set_reference(master, logmsg='commit: Switching to %s' % master)
# END handle empty repositories
# END advance head handling
return new_commit
#{ Serializable Implementation
def _serialize(self, stream):
write = stream.write
write(("tree %s\n" % self.tree).encode('ascii'))
for p in self.parents:
write(("parent %s\n" % p).encode('ascii'))
a = self.author
aname = a.name
c = self.committer
fmt = "%s %s <%s> %s %s\n"
write((fmt % ("author", aname, a.email,
self.authored_date,
altz_to_utctz_str(self.author_tz_offset))).encode(self.encoding))
# encode committer
aname = c.name
write((fmt % ("committer", aname, c.email,
self.committed_date,
altz_to_utctz_str(self.committer_tz_offset))).encode(self.encoding))
if self.encoding != self.default_encoding:
write(("encoding %s\n" % self.encoding).encode('ascii'))
try:
if self.__getattribute__('gpgsig') is not None:
write(b"gpgsig")
for sigline in self.gpgsig.rstrip("\n").split("\n"):
write((" " + sigline + "\n").encode('ascii'))
except AttributeError:
pass
write(b"\n")
# write plain bytes, be sure its encoded according to our encoding
if isinstance(self.message, text_type):
write(self.message.encode(self.encoding))
else:
write(self.message)
# END handle encoding
return self
def _deserialize(self, stream):
""":param from_rev_list: if true, the stream format is coming from the rev-list command
Otherwise it is assumed to be a plain data stream from our object"""
readline = stream.readline
self.tree = Tree(self.repo, hex_to_bin(readline().split()[1]), Tree.tree_id << 12, '')
self.parents = list()
next_line = None
while True:
parent_line = readline()
if not parent_line.startswith(b'parent'):
next_line = parent_line
break
# END abort reading parents
self.parents.append(type(self)(self.repo, hex_to_bin(parent_line.split()[-1].decode('ascii'))))
# END for each parent line
self.parents = tuple(self.parents)
# we don't know actual author encoding before we have parsed it, so keep the lines around
author_line = next_line
committer_line = readline()
# we might run into one or more mergetag blocks, skip those for now
next_line = readline()
while next_line.startswith(b'mergetag '):
next_line = readline()
while next_line.startswith(b' '):
next_line = readline()
# end skip mergetags
# now we can have the encoding line, or an empty line followed by the optional
# message.
self.encoding = self.default_encoding
self.gpgsig = None
# read headers
enc = next_line
buf = enc.strip()
while buf:
if buf[0:10] == b"encoding ":
self.encoding = buf[buf.find(' ') + 1:].decode('ascii')
elif buf[0:7] == b"gpgsig ":
sig = buf[buf.find(b' ') + 1:] + b"\n"
is_next_header = False
while True:
sigbuf = readline()
if not sigbuf:
break
if sigbuf[0:1] != b" ":
buf = sigbuf.strip()
is_next_header = True
break
sig += sigbuf[1:]
# end read all signature
self.gpgsig = sig.rstrip(b"\n").decode('ascii')
if is_next_header:
continue
buf = readline().strip()
# decode the authors name
try:
self.author, self.authored_date, self.author_tz_offset = \
parse_actor_and_date(author_line.decode(self.encoding, 'replace'))
except UnicodeDecodeError:
log.error("Failed to decode author line '%s' using encoding %s", author_line, self.encoding,
exc_info=True)
try:
self.committer, self.committed_date, self.committer_tz_offset = \
parse_actor_and_date(committer_line.decode(self.encoding, 'replace'))
except UnicodeDecodeError:
log.error("Failed to decode committer line '%s' using encoding %s", committer_line, self.encoding,
exc_info=True)
# END handle author's encoding
# a stream from our data simply gives us the plain message
# The end of our message stream is marked with a newline that we strip
self.message = stream.read()
try:
self.message = self.message.decode(self.encoding, 'replace')
except UnicodeDecodeError:
log.error("Failed to decode message '%s' using encoding %s", self.message, self.encoding, exc_info=True)
# END exception handling
return self
#} END serializable implementation
|
jeblair/GitPython
|
git/objects/commit.py
|
Python
|
bsd-3-clause
| 20,799 | 0.002164 |
# -*- coding: utf-8 -*-
# 中文对齐不能用python自带的函数,需要自己根据中文长度增/减空格
# Python 2.7.12 & matplotlib 2.0.0
import re
from urllib2 import *
import matplotlib.pyplot as plt
#Get a set of records from nba.hupu.com due to given team
def getDataSet(team):
statUserAgent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.115 Safari/537.36'
statHeaders = {'User-Agent': statUserAgent}
statRequest = Request('https://nba.hupu.com/schedule/' + team, headers=statHeaders)
statResponse = urlopen(statRequest, timeout = 10)
statHtml = statResponse.read()
#Load Game information using regular expression
statPattern = re.compile(
'''<tr.*?<a.*?>(.*?)</a>.*?>(.*?)</a>.*?<td.*?>(.*?)</td>.*?<td.*?>(.*?)</td>.*?<td.*?>(.*?)</td>.*?</tr>''',
re.S)
#store all the records that corresponds to the RE rule
statDataSet = re.findall(statPattern, statHtml)
return statDataSet
class Record:
# initialization
def __init__(self, team):
self.team = team #Name of the team in Chinese
self.num = 0 #Number of the game
self.place = '' #Home/Road
self.done = True #True is the game is finished, else False
self.result = '' #The result of the game in Chinese
self.opp = '' #Opppnent of the game in Chinese
self.score = '' #The score of 2 teams in string like " XX:XX"
self.scoreSelf = '' #The score of team
self.scoreOpp = '' #The score of opponent
self.scoreDiff = 0 #The difference in scores (positive if win/negative if lose)
self.dateTime = '' #The date and time of this game (Beijing Time) in string
self.date = '' #The date of this game
self.time = '' #The time of this game
# Load seperated data from a list generated by regular expression decomposing
def Load(self, statRecord, count):
#Get the number of this game record
self.num = count
#if this games is unfinished
if statRecord[3].strip() == '-':
self.done = False
#To find out if it is a Home Game or a Road Game
if statRecord[0] == self.team:
self.place = '客'
self.opp = statRecord[1]
else:
self.place = '主'
self.opp = statRecord[0]
#if the game is finished
else:
#Get the result of this game
if statRecord[3].strip() == '胜':
self.result = '胜'
else:
self.result = '负'
if statRecord[0] == self.team:
self.place = '客'
self.opp = statRecord[1]
#Get the score of this game
self.scoreSelf = re.findall(r'^\d+', statRecord[2].strip())[0]
self.scoreOpp = re.findall(r'\d+$', statRecord[2].strip())[0]
self.score = self.scoreSelf + '-' + self.scoreOpp
else:
self.place = '主'
self.opp = statRecord[0]
self.scoreSelf = re.findall(r'\d+$', statRecord[2].strip())[0]
self.scoreOpp = re.findall(r'^\d+', statRecord[2].strip())[0]
self.score = self.scoreOpp + '-' + self.scoreSelf
self.scoreDiff = eval(self.scoreSelf) - eval(self.scoreOpp)
#Get the date and time of this game
self.dateTime = statRecord[4]
self.date = self.dateTime.split()[0]
self.time = self.dateTime.split()[1]
# Print game message
def Print(self):
#Trick here to fill in suitable spaces to align Chinese
spaceExtraSelf = ' '
spaceExtraOpp = ' '
if len(self.team) == 9: spaceExtraSelf = ' '
if len(self.team) == 5: spaceExtraSelf = ' '
if len(self.opp) == 9: spaceExtraOpp = ' '
if len(self.opp) == 5: spaceExtraOpp = ' '
if self.done == True:
if self.place == '客':
print ('Game %2s %s%10s VS %-10s%s %3s : %3s %+4d %s' % (
self.num, spaceExtraSelf, self.team, self.opp, spaceExtraOpp, self.scoreSelf, self.scoreOpp,
self.scoreDiff, self.dateTime))
if self.place == '主':
print ('Game %2s %s%10s VS %-10s%s %3s : %3s %+4d %s' % (
self.num, spaceExtraOpp, self.opp, self.team, spaceExtraSelf, self.scoreOpp, self.scoreSelf,
self.scoreDiff, self.dateTime))
else:
if self.place == '客':
print ('Game %2s %s%10s VS %-10s%s %s' % (
self.num, spaceExtraSelf, self.team, self.opp, spaceExtraOpp, self.dateTime))
if self.place == '主':
print ('Game %2s %s%10s VS %-10s%s %s' % (
self.num, spaceExtraOpp, self.opp, self.team, spaceExtraSelf, self.dateTime))
if __name__ == "__main__":
#Dictionary of team's English and Chinese names
#We need english names to implement url and Chinese name to print in Console
teams = {'spurs': '马刺', 'rockets': '火箭', 'grizzlies': '灰熊', 'pelicans':'鹈鹕', 'mavericks':'小牛',
'warriors': '勇士', 'clippers':'快船', 'kings': '国王', 'lakers': '湖人', 'suns': '太阳',
'jazz': '爵士', 'thunder': '雷霆', 'blazers': '开拓者', 'nuggets': '掘金', 'timberwolves': '森林狼',
'celtics': '凯尔特人', 'raptors': '猛龙', 'knicks': '尼克斯', '76ers': '76人', 'nets': '篮网',
'wizards': '奇才', 'hawks': '老鹰', 'heat': '热火', 'hornets': '黄蜂', 'magic': '魔术',
'cavaliers': '骑士', 'bucks':'雄鹿', 'bulls': '公牛', 'pacers': '步行者', 'pistons': '活塞'}
for team in teams:
#Comment this if and unindent codes below to get all 30 team's results
if team == 'rockets':
statDataSet = getDataSet(team)
countGame = 0
countWin = 0
countLose = 0
streak = ''
streakCount = 0
results = []
#Count Wins and Loses and print every record
for statRecord in statDataSet:
countGame += 1
record = Record(teams[team])
record.Load(statRecord, countGame)
if record.done == True:
results.append(record.scoreDiff)
if record.result == '胜':
countWin += 1
else:
countLose += 1
record.Print()
#Reverse the records to check the Win/Lose streak
statDataSet.reverse()
for statRecord in statDataSet:
record = Record(teams[team])
record.Load(statRecord, countGame)
if streak == '':
streak = record.result
streakCount = 1
continue
if record.result == streak:
streakCount += 1
else:
break
#Plot results one by one
x = range(0, len(results))
figure = plt.figure()
plt.plot(x, results, 'r-', alpha = 0.6, label = 'dot')
plt.plot(x, results, 'ro', label = 'line')
plt.title(team.upper() + ' +/- Overall' )
plt.xlabel('Game NO.')
plt.ylabel('+/-')
plt.grid(True)
figure.set_size_inches(12,4)
plt.legend(loc = 'upper right')
plt.show()
print('Total : %d W / %d L %d 连%s中' % (countWin, countLose, streakCount, streak))
|
NickyChan/NBASchedule
|
main.py
|
Python
|
gpl-3.0
| 7,776 | 0.010973 |
import threading
import upnp
import nupnp
class DiscoveryThread(threading.Thread):
def __init__(self, bridges):
super(DiscoveryThread, self).__init__()
self.bridges = bridges
self.upnp_thread = upnp.UPnPDiscoveryThread(self.bridges)
self.nupnp_thread = nupnp.NUPnPDiscoveryThread(self.bridges)
def run(self):
self.upnp_thread.start()
self.nupnp_thread.start()
self.upnp_thread.join()
self.nupnp_thread.join()
def discover():
bridges = set()
discovery_thread = DiscoveryThread(bridges)
discovery_thread.start()
discovery_thread.join()
return bridges
|
mpolednik/reddit-button-hue
|
app/discovery/bridges.py
|
Python
|
mit
| 649 | 0 |
# Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
__all__ = ()
from .nonlocalmeans_functionals import *
__all__ += nonlocalmeans_functionals.__all__
|
kohr-h/odl
|
odl/contrib/solvers/functional/__init__.py
|
Python
|
mpl-2.0
| 416 | 0 |
__author__ = 'Ahmed Hani Ibrahim'
from LearningAlgorithm import *
class RPROP(LearningAlgorithm):
def learn(self, learningRate, input, output, network):
"""
:param learningRate:
:param input:
:param output:
:param network:
:return:
"""
pass
|
AhmedHani/Python-Neural-Networks-API
|
OptimizationAlgorithms/RPROP.py
|
Python
|
mit
| 310 | 0.003226 |
# The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of
# the Original Code is reddit Inc.
#
# All portions of the code written by reddit are Copyright (c) 2006-2013 reddit
# Inc. All Rights Reserved.
###############################################################################
from datetime import datetime, timedelta
from babel.numbers import format_number
import json
import urllib
from pylons import c, g, request
from pylons.i18n import _, N_
from r2.controllers.api import ApiController
from r2.controllers.listingcontroller import ListingController
from r2.controllers.reddit_base import RedditController
from r2.lib import cssfilter, inventory, promote
from r2.lib.authorize import get_account_info, edit_profile, PROFILE_LIMIT
from r2.lib.db import queries
from r2.lib.errors import errors
from r2.lib.media import force_thumbnail, thumbnail_url
from r2.lib.memoize import memoize
from r2.lib.menus import NamedButton, NavButton, NavMenu
from r2.lib.pages import (
LinkInfoPage,
PaymentForm,
PromoteInventory,
PromotePage,
PromoteLinkForm,
PromoteLinkNew,
PromoteReport,
Reddit,
RefundPage,
RenderableCampaign,
Roadblocks,
UploadedImage,
)
from r2.lib.pages.things import wrap_links
from r2.lib.system_messages import user_added_messages
from r2.lib.utils import make_offset_date, to_date, to36
from r2.lib.validator import (
json_validate,
nop,
noresponse,
VAccountByName,
ValidAddress,
validate,
validatedForm,
ValidCard,
ValidIP,
VBid,
VBoolean,
VByName,
VDate,
VDateRange,
VExistingUname,
VFloat,
VImageType,
VInt,
VLength,
VLink,
VLocation,
VModhash,
VOneOf,
VPriority,
VPromoCampaign,
VRatelimit,
VSelfText,
VShamedDomain,
VSponsor,
VSponsorAdmin,
VSponsorAdminOrAdminSecret,
VSubmitSR,
VTitle,
VUploadLength,
VUrl,
)
from r2.models import (
Account,
calc_impressions,
Frontpage,
get_promote_srid,
Link,
Message,
NotFound,
PromoCampaign,
PromotionLog,
PromotionWeights,
PromotedLinkRoadblock,
Subreddit,
)
def campaign_has_oversold_error(form, campaign):
if campaign.priority.inventory_override:
return
target = Subreddit._by_name(campaign.sr_name) if campaign.sr_name else None
return has_oversold_error(form, campaign, campaign.start_date,
campaign.end_date, campaign.bid, campaign.cpm,
target, campaign.location)
def has_oversold_error(form, campaign, start, end, bid, cpm, target, location):
ndays = (to_date(end) - to_date(start)).days
total_request = calc_impressions(bid, cpm)
daily_request = int(total_request / ndays)
oversold = inventory.get_oversold(target or Frontpage, start, end,
daily_request, ignore=campaign,
location=location)
if oversold:
min_daily = min(oversold.values())
available = min_daily * ndays
msg_params = {
'available': format_number(available, locale=c.locale),
'target': target.name if target else 'the frontpage',
'start': start.strftime('%m/%d/%Y'),
'end': end.strftime('%m/%d/%Y'),
}
c.errors.add(errors.OVERSOLD_DETAIL, field='bid',
msg_params=msg_params)
form.has_errors('bid', errors.OVERSOLD_DETAIL)
return True
class PromoteController(RedditController):
@validate(VSponsor())
def GET_new_promo(self):
return PromotePage(title=_("create sponsored link"),
content=PromoteLinkNew()).render()
@validate(VSponsor('link'),
link=VLink('link'))
def GET_edit_promo(self, link):
if not link or link.promoted is None:
return self.abort404()
rendered = wrap_links(link, skip=False)
form = PromoteLinkForm(link, rendered)
page = Reddit(title=_("edit sponsored link"), content=form,
show_sidebar=False, extension_handling=False)
return page.render()
# admin only because the route might change
@validate(VSponsorAdmin('campaign'),
campaign=VPromoCampaign('campaign'))
def GET_edit_promo_campaign(self, campaign):
if not campaign:
return self.abort404()
link = Link._byID(campaign.link_id)
return self.redirect(promote.promo_edit_url(link))
@validate(VSponsorAdmin(),
link=VLink("link"),
campaign=VPromoCampaign("campaign"))
def GET_refund(self, link, campaign):
if campaign.link_id != link._id:
return self.abort404()
content = RefundPage(link, campaign)
return Reddit("refund", content=content, show_sidebar=False).render()
@validate(VSponsorAdmin())
def GET_roadblock(self):
return PromotePage(title=_("manage roadblocks"),
content=Roadblocks()).render()
@validate(VSponsor("link"),
link=VLink("link"),
campaign=VPromoCampaign("campaign"))
def GET_pay(self, link, campaign):
# no need for admins to play in the credit card area
if c.user_is_loggedin and c.user._id != link.author_id:
return self.abort404()
if not campaign.link_id == link._id:
return self.abort404()
if g.authorizenetapi:
data = get_account_info(c.user)
content = PaymentForm(link, campaign,
customer_id=data.customerProfileId,
profiles=data.paymentProfiles,
max_profiles=PROFILE_LIMIT)
else:
content = None
res = LinkInfoPage(link=link,
content=content,
show_sidebar=False)
return res.render()
@validate(VSponsorAdminOrAdminSecret('secret'),
start=VDate('startdate'),
end=VDate('enddate'),
link_text=nop('link_text'),
owner=VAccountByName('owner'))
def GET_report(self, start, end, link_text=None, owner=None):
now = datetime.now(g.tz).replace(hour=0, minute=0, second=0,
microsecond=0)
end = end or now - timedelta(days=1)
start = start or end - timedelta(days=7)
links = []
bad_links = []
owner_name = owner.name if owner else ''
if owner:
promo_weights = PromotionWeights.get_campaigns(start, end,
author_id=owner._id)
campaign_ids = [pw.promo_idx for pw in promo_weights]
campaigns = PromoCampaign._byID(campaign_ids, data=True)
link_ids = {camp.link_id for camp in campaigns.itervalues()}
links.extend(Link._byID(link_ids, data=True, return_dict=False))
if link_text is not None:
id36s = link_text.replace(',', ' ').split()
try:
links_from_text = Link._byID36(id36s, data=True)
except NotFound:
links_from_text = {}
bad_links = [id36 for id36 in id36s if id36 not in links_from_text]
links.extend(links_from_text.values())
content = PromoteReport(links, link_text, owner_name, bad_links, start,
end)
if c.render_style == 'csv':
return content.as_csv()
else:
return PromotePage(title=_("sponsored link report"),
content=content).render()
@validate(
VSponsorAdmin(),
start=VDate('startdate', reference_date=promote.promo_datetime_now),
end=VDate('enddate', reference_date=promote.promo_datetime_now),
sr_name=nop('sr_name'),
)
def GET_promote_inventory(self, start, end, sr_name):
if not start or not end:
start = promote.promo_datetime_now(offset=1).date()
end = promote.promo_datetime_now(offset=8).date()
c.errors.remove((errors.BAD_DATE, 'startdate'))
c.errors.remove((errors.BAD_DATE, 'enddate'))
sr = Frontpage
if sr_name:
try:
sr = Subreddit._by_name(sr_name)
except NotFound:
c.errors.add(errors.SUBREDDIT_NOEXIST, field='sr_name')
content = PromoteInventory(start, end, sr)
return PromotePage(title=_("sponsored link inventory"),
content=content).render()
class PromoteListingController(ListingController):
where = 'promoted'
render_cls = PromotePage
titles = {
'future_promos': N_('unapproved promoted links'),
'pending_promos': N_('accepted promoted links'),
'unpaid_promos': N_('unpaid promoted links'),
'rejected_promos': N_('rejected promoted links'),
'live_promos': N_('live promoted links'),
'underdelivered': N_('underdelivered promoted links'),
'reported': N_('reported promoted links'),
'house': N_('house promoted links'),
'all': N_('all promoted links'),
}
def title(self):
return _(self.titles[self.sort])
@property
def title_text(self):
return _('promoted by you')
@classmethod
@memoize('live_by_subreddit', time=300)
def _live_by_subreddit(cls, sr_names):
promotuples = promote.get_live_promotions(sr_names)
return [pt.link for pt in promotuples]
def live_by_subreddit(cls, sr):
sr_names = [''] if sr == Frontpage else [sr.name]
return cls._live_by_subreddit(sr_names)
@classmethod
@memoize('house_link_names', time=60)
def get_house_link_names(cls):
now = promote.promo_datetime_now()
pws = PromotionWeights.get_campaigns(now)
campaign_ids = {pw.promo_idx for pw in pws}
q = PromoCampaign._query(PromoCampaign.c._id.in_(campaign_ids),
PromoCampaign.c.priority_name == 'house',
data=True)
return [Link._fullname_from_id36(to36(camp.link_id)) for camp in q]
@property
def menus(self):
filters = [
NamedButton('all_promos', dest=''),
NamedButton('future_promos'),
NamedButton('unpaid_promos'),
NamedButton('rejected_promos'),
NamedButton('pending_promos'),
NamedButton('live_promos'),
]
menus = [NavMenu(filters, base_path='/promoted', title='show',
type='lightdrop')]
if self.sort == 'live_promos' and c.user_is_sponsor:
srnames = promote.all_live_promo_srnames()
buttons = [NavButton('all', '')]
try:
srnames.remove('')
frontbutton = NavButton('FRONTPAGE', Frontpage.name,
aliases=['/promoted/live_promos/%s' %
urllib.quote(Frontpage.name)])
buttons.append(frontbutton)
except KeyError:
pass
srnames = sorted(srnames, key=lambda name: name.lower())
buttons.extend([NavButton(name, name) for name in srnames])
menus.append(NavMenu(buttons, base_path='/promoted/live_promos',
title='subreddit', type='lightdrop'))
return menus
def keep_fn(self):
def keep(item):
if self.sort == "future_promos":
# this sort is used to review links that need to be approved
# skip links that don't have any paid campaigns
campaigns = list(PromoCampaign._by_link(item._id))
if not any(promote.authed_or_not_needed(camp)
for camp in campaigns):
return False
if item.promoted and not item._deleted:
return True
else:
return False
return keep
def query(self):
if c.user_is_sponsor:
if self.sort == "future_promos":
return queries.get_all_unapproved_links()
elif self.sort == "pending_promos":
return queries.get_all_accepted_links()
elif self.sort == "unpaid_promos":
return queries.get_all_unpaid_links()
elif self.sort == "rejected_promos":
return queries.get_all_rejected_links()
elif self.sort == "live_promos" and self.sr:
return self.live_by_subreddit(self.sr)
elif self.sort == 'live_promos':
return queries.get_all_live_links()
elif self.sort == 'underdelivered':
q = queries.get_underdelivered_campaigns()
campaigns = PromoCampaign._by_fullname(list(q), data=True,
return_dict=False)
link_ids = [camp.link_id for camp in campaigns]
return [Link._fullname_from_id36(to36(id)) for id in link_ids]
elif self.sort == 'reported':
return queries.get_reported_links(get_promote_srid())
elif self.sort == 'house':
return self.get_house_link_names()
elif self.sort == 'all':
return queries.get_all_promoted_links()
else:
if self.sort == "future_promos":
return queries.get_unapproved_links(c.user._id)
elif self.sort == "pending_promos":
return queries.get_accepted_links(c.user._id)
elif self.sort == "unpaid_promos":
return queries.get_unpaid_links(c.user._id)
elif self.sort == "rejected_promos":
return queries.get_rejected_links(c.user._id)
elif self.sort == "live_promos":
return queries.get_live_links(c.user._id)
elif self.sort == "all":
return queries.get_promoted_links(c.user._id)
@validate(VSponsor(),
sr=nop('sr'))
def GET_listing(self, sr=None, sort="all", **env):
if not c.user_is_loggedin or not c.user.email_verified:
# never reached--see MinimalController.on_validation_error
return self.redirect("/ad_inq")
self.sort = sort
self.sr = None
if sr and sr == Frontpage.name:
self.sr = Frontpage
elif sr:
try:
self.sr = Subreddit._by_name(sr)
except NotFound:
pass
return ListingController.GET_listing(self, **env)
class PromoteApiController(ApiController):
@json_validate(sr=VSubmitSR('sr', promotion=True),
location=VLocation(),
start=VDate('startdate'),
end=VDate('enddate'))
def GET_check_inventory(self, responder, sr, location, start, end):
sr = sr or Frontpage
if not location or not location.country:
available = inventory.get_available_pageviews(sr, start, end,
datestr=True)
else:
available = inventory.get_available_pageviews_geotargeted(sr,
location, start, end, datestr=True)
return {'inventory': available}
@validatedForm(VSponsorAdmin(),
VModhash(),
link=VLink("link_id36"),
campaign=VPromoCampaign("campaign_id36"))
def POST_freebie(self, form, jquery, link, campaign):
if campaign_has_oversold_error(form, campaign):
form.set_html(".freebie", "target oversold, can't freebie")
return
if promote.is_promo(link) and campaign:
promote.free_campaign(link, campaign, c.user)
form.redirect(promote.promo_edit_url(link))
@validatedForm(VSponsorAdmin(),
VModhash(),
link=VByName("link"),
note=nop("note"))
def POST_promote_note(self, form, jquery, link, note):
if promote.is_promo(link):
text = PromotionLog.add(link, note)
form.find(".notes").children(":last").after(
"<p>" + text + "</p>")
@noresponse(VSponsorAdmin(),
VModhash(),
thing=VByName('id'))
def POST_promote(self, thing):
if promote.is_promo(thing):
promote.accept_promotion(thing)
@noresponse(VSponsorAdmin(),
VModhash(),
thing=VByName('id'),
reason=nop("reason"))
def POST_unpromote(self, thing, reason):
if promote.is_promo(thing):
promote.reject_promotion(thing, reason=reason)
@validatedForm(VSponsorAdmin(),
VModhash(),
link=VLink('link'),
campaign=VPromoCampaign('campaign'))
def POST_refund_campaign(self, form, jquery, link, campaign):
billable_impressions = promote.get_billable_impressions(campaign)
billable_amount = promote.get_billable_amount(campaign,
billable_impressions)
refund_amount = promote.get_refund_amount(campaign, billable_amount)
if refund_amount > 0:
promote.refund_campaign(link, campaign, billable_amount,
billable_impressions)
form.set_html('.status', _('refund succeeded'))
else:
form.set_html('.status', _('refund not needed'))
@validatedForm(VSponsor('link_id36'),
VModhash(),
VRatelimit(rate_user=True,
rate_ip=True,
prefix='create_promo_'),
VShamedDomain('url'),
username=VLength('username', 100, empty_error=None),
l=VLink('link_id36'),
title=VTitle('title'),
url=VUrl('url', allow_self=False),
selftext=VSelfText('text'),
kind=VOneOf('kind', ['link', 'self']),
ip=ValidIP(),
disable_comments=VBoolean("disable_comments"),
sendreplies=VBoolean("sendreplies"),
media_width=VInt("media-width", min=0),
media_height=VInt("media-height", min=0),
media_embed=VLength("media-embed", 1000),
media_override=VBoolean("media-override"),
domain_override=VLength("domain", 100)
)
def POST_edit_promo(self, form, jquery, ip, username, l, title, url,
selftext, kind, disable_comments, sendreplies, media_height,
media_width, media_embed, media_override, domain_override):
should_ratelimit = False
if not c.user_is_sponsor:
should_ratelimit = True
if not should_ratelimit:
c.errors.remove((errors.RATELIMIT, 'ratelimit'))
# check for user override
if not l and c.user_is_sponsor and username:
try:
user = Account._by_name(username)
except NotFound:
c.errors.add(errors.USER_DOESNT_EXIST, field="username")
form.set_error(errors.USER_DOESNT_EXIST, "username")
return
if not user.email:
c.errors.add(errors.NO_EMAIL_FOR_USER, field="username")
form.set_error(errors.NO_EMAIL_FOR_USER, "username")
return
if not user.email_verified:
c.errors.add(errors.NO_VERIFIED_EMAIL, field="username")
form.set_error(errors.NO_VERIFIED_EMAIL, "username")
return
else:
user = c.user
# check for shame banned domains
if form.has_errors("url", errors.DOMAIN_BANNED):
g.stats.simple_event('spam.shame.link')
return
# demangle URL in canonical way
if url:
if isinstance(url, (unicode, str)):
form.set_inputs(url=url)
elif isinstance(url, tuple) or isinstance(url[0], Link):
# there's already one or more links with this URL, but
# we're allowing mutliple submissions, so we really just
# want the URL
url = url[0].url
if kind == 'link':
if form.has_errors('url', errors.NO_URL, errors.BAD_URL):
return
# users can change the disable_comments on promoted links
if ((not l or not promote.is_promoted(l)) and
(form.has_errors('title', errors.NO_TEXT, errors.TOO_LONG) or
jquery.has_errors('ratelimit', errors.RATELIMIT))):
return
if not l:
l = promote.new_promotion(title, url if kind == 'link' else 'self',
selftext if kind == 'self' else '',
user, ip)
elif promote.is_promo(l):
# changing link type is not allowed
if ((l.is_self and kind == 'link') or
(not l.is_self and kind == 'self')):
c.errors.add(errors.NO_CHANGE_KIND, field="kind")
form.set_error(errors.NO_CHANGE_KIND, "kind")
return
changed = False
# live items can only be changed by a sponsor, and also
# pay the cost of de-approving the link
trusted = c.user_is_sponsor or c.user.trusted_sponsor
if not promote.is_promoted(l) or trusted:
if title and title != l.title:
l.title = title
changed = not trusted
if kind == 'link' and url and url != l.url:
l.url = url
changed = not trusted
# only trips if the title and url are changed by a non-sponsor
if changed:
promote.unapprove_promotion(l)
# selftext can be changed at any time
if kind == 'self':
l.selftext = selftext
# comment disabling and sendreplies is free to be changed any time.
l.disable_comments = disable_comments
l.sendreplies = sendreplies
if c.user_is_sponsor or c.user.trusted_sponsor:
if media_embed and media_width and media_height:
l.media_object = dict(height=media_height,
width=media_width,
content=media_embed,
type='custom')
else:
l.media_object = None
l.media_override = media_override
if getattr(l, "domain_override", False) or domain_override:
l.domain_override = domain_override
l._commit()
form.redirect(promote.promo_edit_url(l))
@validatedForm(VSponsorAdmin(),
VModhash(),
dates=VDateRange(['startdate', 'enddate'],
reference_date=promote.promo_datetime_now),
sr=VSubmitSR('sr', promotion=True))
def POST_add_roadblock(self, form, jquery, dates, sr):
if (form.has_errors('startdate', errors.BAD_DATE) or
form.has_errors('enddate', errors.BAD_DATE, errors.BAD_DATE_RANGE)):
return
if form.has_errors('sr', errors.SUBREDDIT_NOEXIST,
errors.SUBREDDIT_NOTALLOWED,
errors.SUBREDDIT_REQUIRED):
return
if dates and sr:
sd, ed = dates
PromotedLinkRoadblock.add(sr, sd, ed)
jquery.refresh()
@validatedForm(VSponsorAdmin(),
VModhash(),
dates=VDateRange(['startdate', 'enddate'],
reference_date=promote.promo_datetime_now),
sr=VSubmitSR('sr', promotion=True))
def POST_rm_roadblock(self, form, jquery, dates, sr):
if dates and sr:
sd, ed = dates
PromotedLinkRoadblock.remove(sr, sd, ed)
jquery.refresh()
@validatedForm(VSponsor('link_id36'),
VModhash(),
dates=VDateRange(['startdate', 'enddate'],
earliest=timedelta(days=g.min_promote_future),
latest=timedelta(days=g.max_promote_future),
reference_date=promote.promo_datetime_now,
business_days=True,
sponsor_override=True),
link=VLink('link_id36'),
bid=VBid('bid', min=0, max=g.max_promote_bid,
coerce=False, error=errors.BAD_BID),
sr=VSubmitSR('sr', promotion=True),
campaign_id36=nop("campaign_id36"),
targeting=VLength("targeting", 10),
priority=VPriority("priority"),
location=VLocation())
def POST_edit_campaign(self, form, jquery, link, campaign_id36,
dates, bid, sr, targeting, priority, location):
if not link:
return
start, end = dates or (None, None)
author = Account._byID(link.author_id, data=True)
cpm = author.cpm_selfserve_pennies
if location:
cpm += g.cpm_selfserve_geotarget.pennies
if (form.has_errors('startdate', errors.BAD_DATE,
errors.DATE_TOO_EARLY, errors.DATE_TOO_LATE) or
form.has_errors('enddate', errors.BAD_DATE, errors.DATE_TOO_EARLY,
errors.DATE_TOO_LATE, errors.BAD_DATE_RANGE)):
return
# Limit the number of PromoCampaigns a Link can have
# Note that the front end should prevent the user from getting
# this far
existing_campaigns = list(PromoCampaign._by_link(link._id))
if len(existing_campaigns) > g.MAX_CAMPAIGNS_PER_LINK:
c.errors.add(errors.TOO_MANY_CAMPAIGNS,
msg_params={'count': g.MAX_CAMPAIGNS_PER_LINK},
field='title')
form.has_errors('title', errors.TOO_MANY_CAMPAIGNS)
return
campaign = None
if campaign_id36:
try:
campaign = PromoCampaign._byID36(campaign_id36)
except NotFound:
pass
if priority.cpm:
if form.has_errors('bid', errors.BAD_BID):
return
# you cannot edit the bid of a live ad unless it's a freebie
if (campaign and bid != campaign.bid and
promote.is_live_promo(link, campaign) and
not campaign.is_freebie()):
c.errors.add(errors.BID_LIVE, field='bid')
form.has_errors('bid', errors.BID_LIVE)
return
min_bid = 0 if c.user_is_sponsor else g.min_promote_bid
if bid is None or bid < min_bid:
c.errors.add(errors.BAD_BID, field='bid',
msg_params={'min': min_bid,
'max': g.max_promote_bid})
form.has_errors('bid', errors.BAD_BID)
return
else:
bid = 0. # Set bid to 0 as dummy value
if targeting == 'one':
if form.has_errors('sr', errors.SUBREDDIT_NOEXIST,
errors.SUBREDDIT_NOTALLOWED,
errors.SUBREDDIT_REQUIRED):
# checking to get the error set in the form, but we can't
# check for rate-limiting if there's no subreddit
return
roadblock = PromotedLinkRoadblock.is_roadblocked(sr, start, end)
if roadblock and not c.user_is_sponsor:
msg_params = {"start": roadblock[0].strftime('%m/%d/%Y'),
"end": roadblock[1].strftime('%m/%d/%Y')}
c.errors.add(errors.OVERSOLD, field='sr',
msg_params=msg_params)
form.has_errors('sr', errors.OVERSOLD)
return
elif targeting == 'none':
sr = None
# Check inventory
campaign = campaign if campaign_id36 else None
if not priority.inventory_override:
oversold = has_oversold_error(form, campaign, start, end, bid, cpm,
sr, location)
if oversold:
return
if campaign:
promote.edit_campaign(link, campaign, dates, bid, cpm, sr, priority,
location)
else:
campaign = promote.new_campaign(link, dates, bid, cpm, sr, priority,
location)
rc = RenderableCampaign.from_campaigns(link, campaign)
jquery.update_campaign(campaign._fullname, rc.render_html())
@validatedForm(VSponsor('link_id36'),
VModhash(),
l=VLink('link_id36'),
campaign=VPromoCampaign("campaign_id36"))
def POST_delete_campaign(self, form, jquery, l, campaign):
if l and campaign:
promote.delete_campaign(l, campaign)
@validatedForm(VSponsorAdmin(),
VModhash(),
link=VLink('link_id36'),
campaign=VPromoCampaign("campaign_id36"))
def POST_terminate_campaign(self, form, jquery, link, campaign):
if link and campaign:
promote.terminate_campaign(link, campaign)
rc = RenderableCampaign.from_campaigns(link, campaign)
jquery.update_campaign(campaign._fullname, rc.render_html())
@validatedForm(VSponsor('link'),
VModhash(),
link=VByName("link"),
campaign=VPromoCampaign("campaign"),
customer_id=VInt("customer_id", min=0),
pay_id=VInt("account", min=0),
edit=VBoolean("edit"),
address=ValidAddress(
["firstName", "lastName", "company", "address",
"city", "state", "zip", "country", "phoneNumber"]),
creditcard=ValidCard(["cardNumber", "expirationDate",
"cardCode"]))
def POST_update_pay(self, form, jquery, link, campaign, customer_id, pay_id,
edit, address, creditcard):
# Check inventory
if campaign_has_oversold_error(form, campaign):
return
address_modified = not pay_id or edit
form_has_errors = False
if address_modified:
if (form.has_errors(["firstName", "lastName", "company", "address",
"city", "state", "zip",
"country", "phoneNumber"],
errors.BAD_ADDRESS) or
form.has_errors(["cardNumber", "expirationDate", "cardCode"],
errors.BAD_CARD)):
form_has_errors = True
elif g.authorizenetapi:
pay_id = edit_profile(c.user, address, creditcard, pay_id)
else:
pay_id = 1
# if link is in use or finished, don't make a change
if pay_id and not form_has_errors:
# valid bid and created or existing bid id.
# check if already a transaction
if g.authorizenetapi:
success, reason = promote.auth_campaign(link, campaign, c.user,
pay_id)
else:
success = True
if success:
form.redirect(promote.promo_edit_url(link))
else:
form.set_html(".status",
reason or
_("failed to authenticate card. sorry."))
@validate(VSponsor("link_name"),
VModhash(),
link=VByName('link_name'),
file=VUploadLength('file', 500*1024),
img_type=VImageType('img_type'))
def POST_link_thumb(self, link=None, file=None, img_type='jpg'):
if link and (not promote.is_promoted(link) or
c.user_is_sponsor or c.user.trusted_sponsor):
errors = dict(BAD_CSS_NAME="", IMAGE_ERROR="")
try:
# thumnails for promoted links can change and therefore expire
force_thumbnail(link, file, file_type=".%s" % img_type)
except cssfilter.BadImage:
# if the image doesn't clean up nicely, abort
errors["IMAGE_ERROR"] = _("bad image")
if any(errors.values()):
return UploadedImage("", "", "upload", errors=errors,
form_id="image-upload").render()
else:
link._commit()
return UploadedImage(_('saved'), thumbnail_url(link), "",
errors=errors,
form_id="image-upload").render()
|
h2oloopan/easymerge
|
EasyMerge/tests/reddit/r2/r2/controllers/promotecontroller.py
|
Python
|
mit
| 34,376 | 0.000698 |
from django.conf.urls import patterns, url
urlpatterns = patterns(
'mtr.utils.views',
url(r'^model/(?P<name>.+)/pk/(?P<pk>\d+)$',
'model_label', name='model_label')
)
|
mtrgroup/django-mtr-utils
|
mtr/utils/urls.py
|
Python
|
mit
| 186 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script de comprobación de entrega de ejercicio
Para ejecutarlo, desde la shell:
$ python check.py login_github
"""
import os
import random
import sys
ejercicio = 'X-Serv-14.5-Sumador-Simple'
student_files = [
'servidor-sumador.py'
]
repo_files = [
'check.py',
'README.md',
'LICENSE',
'.gitignore',
'.git'
]
files = student_files + repo_files
if len(sys.argv) != 2:
print
sys.exit("Usage: $ python check.py login_github")
repo_git = "http://github.com/" + sys.argv[1] + "/" + ejercicio
aleatorio = str(int(random.random() * 1000000))
error = 0
print
print "Clonando el repositorio " + repo_git + "\n"
os.system('git clone ' + repo_git + ' /tmp/' + aleatorio + ' > /dev/null 2>&1')
try:
github_file_list = os.listdir('/tmp/' + aleatorio)
except OSError:
error = 1
print "Error: No se ha podido acceder al repositorio " + repo_git + "."
print
sys.exit()
if len(github_file_list) != len(files):
error = 1
print "Error: número de ficheros en el repositorio incorrecto"
for filename in files:
if filename not in github_file_list:
error = 1
print "\tError: " + filename + " no encontrado en el repositorio."
if not error:
print "Parece que la entrega se ha realizado bien."
print
print "La salida de pep8 es: (si todo va bien, no ha de mostrar nada)"
print
for filename in student_files:
if filename in github_file_list:
os.system('pep8 --repeat --show-source --statistics /tmp/'
+ aleatorio + '/' + filename)
else:
print "Fichero " + filename + " no encontrado en el repositorio."
print
|
begea/X-Serv-14.5-Sumador-Simple
|
check.py
|
Python
|
gpl-3.0
| 1,680 | 0 |
import os
import re
import sys
from setuptools import setup, find_packages
PY3 = sys.version_info[0] == 3
here = os.path.abspath(os.path.dirname(__file__))
name = 'pyramid_kvs'
with open(os.path.join(here, 'README.rst')) as readme:
README = readme.read()
with open(os.path.join(here, 'CHANGES.rst')) as changes:
CHANGES = changes.read()
with open(os.path.join(here, name, '__init__.py')) as v_file:
version = re.compile(r".*__version__ = '(.*?)'",
re.S).match(v_file.read()).group(1)
requires = ['pyramid', 'redis >= 3.0']
if PY3:
requires.append('python3-memcached')
else:
requires.append('python-memcached')
tests_require = ['nose', 'coverage']
if sys.version_info < (2, 7):
tests_require += ['unittest2']
extras_require = {'test': tests_require}
setup(name=name.replace('_', '-'),
version=version,
description='Session and cache for Pyramid',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
],
author='Gandi',
author_email='feedback@gandi.net',
url='https://github.com/Gandi/pyramid_kvs',
keywords='web pyramid pylons',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='{0}.tests'.format(name),
install_requires=requires,
tests_require=tests_require,
extras_require=extras_require
)
|
Gandi/pyramid_kvs
|
setup.py
|
Python
|
gpl-2.0
| 1,633 | 0.000612 |
from django.core.urlresolvers import reverse
from kishore.models import Artist, Song, Release
from base import KishoreTestCase
class ArtistTestCase(KishoreTestCase):
def test_index(self):
resp = self.client.get(reverse('kishore_artists_index'))
self.assertEqual(resp.status_code, 200)
def test_detail(self):
a = Artist.objects.get(pk=1)
resp = self.client.get(a.get_absolute_url())
self.assertEqual(resp.status_code, 200)
class SongTestCase(KishoreTestCase):
def test_index(self):
resp = self.client.get(reverse('kishore_songs_index'))
self.assertEqual(resp.status_code, 200)
def test_detail(self):
s = Song.objects.get(pk=1)
resp = self.client.get(s.get_absolute_url())
self.assertEqual(resp.status_code, 200)
def test_player_html(self):
with self.settings(KISHORE_AUDIO_PLAYER="kishore.models.SoundcloudPlayer"):
s = Song.objects.get(pk=1)
self.assertTrue(s.get_player_html())
# try non-streamable song
s = Song.objects.get(pk=2)
self.assertFalse(s.get_player_html())
def test_download_link(self):
s = Song.objects.get(pk=1)
self.assertTrue(s.download_link())
# try non-downloadable song
s = Song.objects.get(pk=2)
self.assertFalse(s.download_link())
class ReleaseTestCase(KishoreTestCase):
def test_index(self):
resp = self.client.get(reverse('kishore_releases_index'))
self.assertEqual(resp.status_code, 200)
def test_detail(self):
r = Release.objects.get(pk=1)
resp = self.client.get(r.get_absolute_url())
self.assertEqual(resp.status_code, 200)
def test_player_html(self):
with self.settings(KISHORE_AUDIO_PLAYER="kishore.models.SoundcloudPlayer"):
r = Release.objects.get(pk=1)
self.assertTrue(r.get_player_html())
# try non-streamable
r = Release.objects.get(pk=2)
self.assertFalse(r.get_player_html())
|
udbhav/kishore
|
kishore/tests/music.py
|
Python
|
mit
| 2,056 | 0.002432 |
"""
LMS specific monitoring helpers.
"""
|
msegado/edx-platform
|
lms/djangoapps/monitoring/__init__.py
|
Python
|
agpl-3.0
| 41 | 0 |
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2020, PyInstaller Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: Apache-2.0
#-----------------------------------------------------------------------------
import os
import sys
# On Mac OS X tell enchant library where to look for enchant backends (aspell, myspell, ...).
# Enchant is looking for backends in directory 'PREFIX/lib/enchant'
# Note: env. var. ENCHANT_PREFIX_DIR is implemented only in the development version:
# https://github.com/AbiWord/enchant
# https://github.com/AbiWord/enchant/pull/2
# TODO Test this rthook.
if sys.platform.startswith('darwin'):
os.environ['ENCHANT_PREFIX_DIR'] = os.path.join(sys._MEIPASS, 'enchant')
|
etherkit/OpenBeacon2
|
macos/venv/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/hooks/rthooks/pyi_rth_enchant.py
|
Python
|
gpl-3.0
| 968 | 0.004132 |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either exp'
# ress or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zoo.orca.automl.model.base_pytorch_model import PytorchModelBuilder
from zoo.orca.automl.auto_estimator import AutoEstimator
from zoo.chronos.model.Seq2Seq_pytorch import model_creator
from .base_automodel import BasePytorchAutomodel
class AutoSeq2Seq(BasePytorchAutomodel):
def __init__(self,
input_feature_num,
output_target_num,
past_seq_len,
future_seq_len,
optimizer,
loss,
metric,
lr=0.001,
lstm_hidden_dim=128,
lstm_layer_num=2,
dropout=0.25,
teacher_forcing=False,
backend="torch",
logs_dir="/tmp/auto_seq2seq",
cpus_per_trial=1,
name="auto_seq2seq",
remote_dir=None,
):
"""
Create an AutoSeq2Seq.
:param input_feature_num: Int. The number of features in the input
:param output_target_num: Int. The number of targets in the output
:param past_seq_len: Int. The number of historical steps used for forecasting.
:param future_seq_len: Int. The number of future steps to forecast.
:param optimizer: String or pyTorch optimizer creator function or
tf.keras optimizer instance.
:param loss: String or pytorch/tf.keras loss instance or pytorch loss creator function.
:param metric: String. The evaluation metric name to optimize. e.g. "mse"
:param lr: float or hp sampling function from a float space. Learning rate.
e.g. hp.choice([0.001, 0.003, 0.01])
:param lstm_hidden_dim: LSTM hidden channel for decoder and encoder.
hp.grid_search([32, 64, 128])
:param lstm_layer_num: LSTM layer number for decoder and encoder.
e.g. hp.randint(1, 4)
:param dropout: float or hp sampling function from a float space. Learning rate. Dropout
rate. e.g. hp.uniform(0.1, 0.3)
:param teacher_forcing: If use teacher forcing in training. e.g. hp.choice([True, False])
:param backend: The backend of the Seq2Seq model. We only support backend as "torch"
for now.
:param logs_dir: Local directory to save logs and results. It defaults to
"/tmp/auto_seq2seq"
:param cpus_per_trial: Int. Number of cpus for each trial. It defaults to 1.
:param name: name of the AutoSeq2Seq. It defaults to "auto_seq2seq"
:param remote_dir: String. Remote directory to sync training results and checkpoints. It
defaults to None and doesn't take effects while running in local. While running in
cluster, it defaults to "hdfs:///tmp/{name}".
"""
super().__init__()
# todo: support search for past_seq_len.
# todo: add input check.
if backend != "torch":
raise ValueError(f"We only support backend as torch. Got {backend}")
self.search_space = dict(
input_feature_num=input_feature_num,
output_feature_num=output_target_num,
past_seq_len=past_seq_len,
future_seq_len=future_seq_len,
lstm_hidden_dim=lstm_hidden_dim,
lstm_layer_num=lstm_layer_num,
lr=lr,
dropout=dropout,
teacher_forcing=teacher_forcing
)
self.metric = metric
model_builder = PytorchModelBuilder(model_creator=model_creator,
optimizer_creator=optimizer,
loss_creator=loss,
)
self.auto_est = AutoEstimator(model_builder=model_builder,
logs_dir=logs_dir,
resources_per_trial={"cpu": cpus_per_trial},
remote_dir=remote_dir,
name=name)
|
intel-analytics/analytics-zoo
|
pyzoo/zoo/chronos/autots/model/auto_seq2seq.py
|
Python
|
apache-2.0
| 4,638 | 0.002803 |
import netifaces
from netaddr import *
for inter in netifaces.interfaces():
addrs = netifaces.ifaddresses(inter)
try:
print(addrs)
print(addrs[netifaces.AF_INET][0]["addr"])
print(addrs[netifaces.AF_INET][0]["broadcast"])
print(addrs[netifaces.AF_INET][0]["netmask"])
local_ip = addrs[netifaces.AF_INET][0]["addr"]
broadcast = addrs[netifaces.AF_INET][0]["broadcast"]
netmask = addrs[netifaces.AF_INET][0]["netmask"]
mac = addrs[netifaces.AF_LINK][0]["addr"]
gws = netifaces.gateways()
gateway = gws['default'][netifaces.AF_INET][0]
interface = inter
ips = []
for ip in IPNetwork(broadcast + '/' + str(IPNetwork('0.0.0.0/' + netmask).prefixlen)).iter_hosts():
ips.append(str(ip))
except:
print("Error")
def get_lan_ip():
global local_ip
return local_ip
def get_broadcast_ip():
global broadcast
return broadcast
def get_all_ips():
global ips
return ips
def get_gateway():
global gateway
return gateway
def get_mac():
global mac
return mac
|
GadgeurX/NetworkLiberator
|
Daemon/Utils.py
|
Python
|
gpl-3.0
| 1,119 | 0.007149 |
__author__ = 'sibirrer'
#this file is ment to be a shell script to be run with Monch cluster
# set up the scene
from cosmoHammer.util.MpiUtil import MpiPool
import time
import sys
import pickle
import dill
start_time = time.time()
#path2load = '/mnt/lnec/sibirrer/input.txt'
path2load = str(sys.argv[1])
f = open(path2load, 'rb')
[lensDES, walkerRatio, n_burn, n_run, mean_start, sigma_start, lowerLimit, upperLimit, path2dump] = dill.load(f)
f.close()
end_time = time.time()
#print end_time - start_time, 'time used for initialisation'
# run the computation
from easylens.Fitting.mcmc import MCMC_sampler
sampler = MCMC_sampler(lensDES, fix_center=False)
samples = sampler.mcmc_CH(walkerRatio, n_run, n_burn, mean_start, sigma_start, lowerLimit, upperLimit, threadCount=1, init_pos=None, mpi_monch=True)
# save the output
pool = MpiPool(None)
if pool.isMaster():
f = open(path2dump, 'wb')
pickle.dump(samples, f)
f.close()
end_time = time.time()
print(end_time - start_time, 'total time needed for computation')
print('Result saved in:', path2dump)
print('============ CONGRATULATION, YOUR JOB WAS SUCCESSFUL ================ ')
|
DES-SL/EasyLens
|
easylens/Scripts/des_script.py
|
Python
|
mit
| 1,168 | 0.006849 |
#! /usr/bin/env python
"""
This program plots the average electronic energy during a NAMD simulatons
averaged over several initial conditions.
It plots both the SH and SE population based energies.
Example:
plot_average_energy.py -p . -nstates 26 -nconds 6
Note that the number of states is the same as given in the pyxaid output.
It must include the ground state as well.
"""
import numpy as np
import os
import matplotlib.pyplot as plt
import argparse
def plot_stuff(outs, pops):
"""
energies - a vector of energy values that can be plotted
"""
dim_x = np.arange(outs.shape[0])
plot = np.column_stack((outs, pops))
plt.xlabel('Time (fs)')
plt.ylabel('Energy (eV)')
plt.plot(dim_x, plot[:, 0:])
fileName = "Average_Energy.png"
plt.show()
plt.savefig(fileName, format='png', dpi=300)
def read_energies(path, fn, nstates, nconds):
inpfile = os.path.join(path, fn)
cols = tuple(range(5, nstates * 2 + 5, 2))
xs = np.stack(np.loadtxt(f'{inpfile}{j}', usecols=cols)
for j in range(nconds)).transpose()
# Rows = timeframes ; Columns = states ; tensor = initial conditions
xs = xs.swapaxes(0, 1)
return xs
def read_pops(path, fn, nstates, nconds):
inpfile = os.path.join(path, fn)
cols = tuple(range(3, nstates * 2 + 3, 2))
xs = np.stack(np.loadtxt(f'{inpfile}{j}', usecols=cols)
for j in range(nconds)).transpose()
# Rows = timeframes ; Columns = states ; tensor = initial conditions
xs = xs.swapaxes(0, 1)
return xs
def main(path_output, nstates, nconds):
outs = read_pops(path_output, 'out', nstates, nconds)
pops = read_pops(path_output, 'me_pop', nstates, nconds)
energies = read_energies(path_output, 'me_energies', nstates, nconds)
# Weighted state energy for a given SH or SH population at time t
eav_outs = energies * outs
eav_pops = energies * pops
# Ensamble average over initial conditions of the electronic energy
# as a function of time
el_ene_outs = np.average(np.sum(eav_outs, axis=1), axis=1)
el_ene_pops = np.average(np.sum(eav_pops, axis=1), axis=1)
# Ensamble average scaled to the lowest excitation energy.
# This way the cooling converge to 0.
lowest_hl_gap = np.average(np.amin(energies[:, 1:, :], axis=1), axis=1)
ene_outs_ref0 = el_ene_outs - lowest_hl_gap
ene_pops_ref0 = el_ene_pops - lowest_hl_gap
plot_stuff(ene_outs_ref0, ene_pops_ref0)
def read_cmd_line(parser):
"""
Parse Command line options.
"""
args = parser.parse_args()
attributes = ['p', 'nstates', 'nconds']
return [getattr(args, p) for p in attributes]
# ============<>===============
if __name__ == "__main__":
msg = "plot_states_pops -p <path/to/output>\
-nstates <number of states computed>\
-nconds <number of initial conditions>"
parser = argparse.ArgumentParser(description=msg)
parser.add_argument('-p', required=True,
help='path to the Hamiltonian files in Pyxaid format')
parser.add_argument('-nstates', type=int, required=True,
help='Number of states')
parser.add_argument('-nconds', type=int, required=True,
help='Number of initial conditions')
main(*read_cmd_line(parser))
|
SCM-NV/qmworks-namd
|
scripts/pyxaid/plot_average_energy.py
|
Python
|
mit
| 3,318 | 0 |
"""Detail firewall."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import firewall
from SoftLayer.CLI import formatting
from SoftLayer import utils
@click.command()
@click.argument('identifier')
@environment.pass_env
def cli(env, identifier):
"""Detail firewall."""
mgr = SoftLayer.FirewallManager(env.client)
firewall_type, firewall_id = firewall.parse_id(identifier)
if firewall_type == 'vlan':
rules = mgr.get_dedicated_fwl_rules(firewall_id)
else:
rules = mgr.get_standard_fwl_rules(firewall_id)
env.fout(get_rules_table(rules))
def get_rules_table(rules):
"""Helper to format the rules into a table.
:param list rules: A list containing the rules of the firewall
:returns: a formatted table of the firewall rules
"""
table = formatting.Table(['#', 'action', 'protocol', 'src_ip', 'src_mask',
'dest', 'dest_mask'])
table.sortby = '#'
for rule in rules:
table.add_row([
rule['orderValue'],
rule['action'],
rule['protocol'],
rule['sourceIpAddress'],
utils.lookup(rule, 'sourceIpSubnetMask'),
'%s:%s-%s' % (rule['destinationIpAddress'],
rule['destinationPortRangeStart'],
rule['destinationPortRangeEnd']),
utils.lookup(rule, 'destinationIpSubnetMask')])
return table
|
nanjj/softlayer-python
|
SoftLayer/CLI/firewall/detail.py
|
Python
|
mit
| 1,514 | 0 |
#!/usr/bin/python
# Python modules imports
from optparse import OptionParser, make_option
import pyupm_grove as g
import os, sys, socket, uuid, dbus, dbus.service
import dbus.mainloop.glib
#import gardening_system
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
# Set up constants
BUS_NAME = 'org.bluez'
AGENT_INTERFACE = 'org.bluez.Agent1'
PROFILE_INTERFACE = 'org.bluez.Profile1'
# Trusted device function
def set_trusted(path):
props = dbus.Interface(bus.get_object("org.bluez", path), "org.freedesktop.DBus.Properties")
props.Set("org.bluez.Device1", "Trusted", True)
# Agent class
class Agent(dbus.service.Object):
@dbus.service.method(AGENT_INTERFACE, in_signature="ou", out_signature="")
def RequestConfirmation(self, device, passkey):
print("\nEnsure this passkey matches with the one in your device: %06d\nPress [ENTER] to continue" % passkey)
set_trusted(device)
return
#Profile class
class Profile(dbus.service.Object):
fd = -1
@dbus.service.method(PROFILE_INTERFACE, in_signature="oha{sv}", out_signature="")
def NewConnection(self, path, fd, properties):
self.fd = fd.take()
device_path = os.path.basename(path)
print("\nConnected to %s\nPress [ENTER] to continue" % device_path)
server_sock = socket.fromfd(self.fd, socket.AF_UNIX, socket.SOCK_STREAM)
server_sock.settimeout(1)
server_sock.send("Hello, this is Edison!")
try:
while True:
try:
data = server_sock.recv(1024)
gardening_system.function(data)
if data == 'b':
server_sock.send(gardening_system.requestData())
except socket.timeout:
pass
gardening_system.myProgram()
except IOError:
pass
server_sock.close()
print("\nYour device is now disconnected\nPress [ENTER] to continue")
def bluetoothConnection():
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
obj = bus.get_object(BUS_NAME, "/org/bluez");
profile_manager = dbus.Interface(obj, "org.bluez.ProfileManager1")
profile_path = "/foo/bar/profile"
auto_connect = {"AutoConnect": False}
profile_uuid = "1101"
profile = Profile(bus, profile_path)
profile_manager.RegisterProfile(profile_path, profile_uuid, auto_connect)
mainloop = GObject.MainLoop()
mainloop.run()
if __name__ == '__main__':
# Generic dbus config
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
obj = bus.get_object(BUS_NAME, "/org/bluez");
# Agent config
agent_capability = "KeyboardDisplay"
agent_path = "/test/agent"
agent = Agent(bus, agent_path)
agent_manager = dbus.Interface(obj, "org.bluez.AgentManager1")
agent_manager.RegisterAgent(agent_path, agent_capability)
agent_manager.RequestDefaultAgent(agent_path)
# Mainloop
mainloop = GObject.MainLoop()
mainloop.run()
|
TheIoTLearningInitiative/CodeLabs
|
Sandbox/Edison_Bluetooth/projects/gardening-system/spp.py
|
Python
|
apache-2.0
| 3,138 | 0.015296 |
#!../venv/bin/python
import sys
print (sys.argv[1:])
|
thinkl33t/mqtt2telegram
|
scripts/test.py
|
Python
|
lgpl-3.0
| 54 | 0.018519 |
import json
from datetime import datetime, timedelta
from django.contrib.contenttypes.models import ContentType
from nose.tools import eq_
from kitsune import search as constants
from kitsune.access.tests import permission
from kitsune.forums.tests import forum, post, restricted_forum, thread
from kitsune.products.tests import product, topic
from kitsune.questions.tests import question, answer, answervote, questionvote
from kitsune.search.tests.test_es import ElasticTestCase
from kitsune.sumo.tests import LocalizingClient
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.tests import group, user
from kitsune.wiki.tests import document, revision, helpful_vote
class AdvancedSearchTests(ElasticTestCase):
client_class = LocalizingClient
def test_json_format(self):
"""JSON without callback should return application/json"""
response = self.client.get(reverse('search'), {
'q': 'bookmarks',
'a': '1',
'format': 'json',
}, follow=True)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'application/json')
def test_json_callback_validation(self):
"""Various json callbacks -- validation"""
response = self.client.get(reverse('search'), {
'q': 'bookmarks',
'a': '1',
'format': 'json',
'callback': 'callback',
}, follow=True)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'application/x-javascript')
def test_json_empty_query_a_1(self):
"""Empty query returns JSON format"""
response = self.client.get(reverse('search'), {
'format': 'json', 'a': 1,
}, follow=True)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'application/json')
def test_json_empty_query_a_2(self):
"""Empty query asking for form returns 400"""
# Test with flags for advanced search or not
response = self.client.get(reverse('search'), {
'format': 'json', 'a': 2,
}, follow=True)
eq_(response.status_code, 400)
eq_(response['Content-Type'], 'application/json')
def test_search_products(self):
p = product(title=u'Product One', slug='product', save=True)
doc1 = document(title=u'cookies', locale='en-US', category=10,
save=True)
revision(document=doc1, is_approved=True, save=True)
doc1.products.add(p)
doc1.save()
self.refresh()
response = self.client.get(
reverse('search.advanced'),
{'a': '1', 'product': 'product', 'q': 'cookies', 'w': '1'})
assert "We couldn't find any results for" not in response.content
eq_(200, response.status_code)
assert 'Product One' in response.content
def test_search_multiple_products(self):
p = product(title=u'Product One', slug='product-one', save=True)
p2 = product(title=u'Product Two', slug='product-two', save=True)
doc1 = document(title=u'cookies', locale='en-US', category=10,
save=True)
revision(document=doc1, is_approved=True, save=True)
doc1.products.add(p)
doc1.products.add(p2)
doc1.save()
self.refresh()
response = self.client.get(reverse('search.advanced'), {
'a': '1',
'product': ['product-one', 'product-two'],
'q': 'cookies',
'w': '1',
})
assert "We couldn't find any results for" not in response.content
eq_(200, response.status_code)
assert 'Product One, Product Two' in response.content
def test_wiki_no_query(self):
"""Tests advanced search with no query"""
doc = document(locale=u'en-US', category=10, save=True)
doc.tags.add(u'desktop')
revision(document=doc, is_approved=True, save=True)
self.refresh()
response = self.client.get(reverse('search.advanced'), {
'q': '', 'tags': 'desktop', 'w': '1', 'a': '1',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 1)
def test_questions_sortby(self):
"""Tests advanced search for questions with a sortby"""
question(title=u'tags tags tags', save=True)
self.refresh()
# Advanced search for questions with sortby set to 3 which is
# '-replies' which is different between Sphinx and ES.
response = self.client.get(reverse('search.advanced'), {
'q': 'tags', 'tags': 'desktop', 'w': '2', 'a': '1', 'sortby': '3',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 1)
def test_sortby_documents_helpful(self):
"""Tests advanced search with a sortby_documents by helpful"""
r1 = revision(is_approved=True, save=True)
r2 = revision(is_approved=True, save=True)
helpful_vote(revision=r2, helpful=True, save=True)
# Note: We have to wipe and rebuild the index because new
# helpful_votes don't update the index data.
self.setup_indexes()
self.reindex_and_refresh()
# r2.document should come first with 1 vote.
response = self.client.get(reverse('search.advanced'), {
'w': '1', 'a': '1', 'sortby_documents': 'helpful',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(r2.document.title, content['results'][0]['title'])
# Vote twice on r1, now it should come first.
helpful_vote(revision=r1, helpful=True, save=True)
helpful_vote(revision=r1, helpful=True, save=True)
self.setup_indexes()
self.reindex_and_refresh()
response = self.client.get(reverse('search.advanced'), {
'w': '1', 'a': '1', 'sortby_documents': 'helpful',
'format': 'json'})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(r1.document.title, content['results'][0]['title'])
def test_questions_num_votes(self):
"""Tests advanced search for questions num_votes filter"""
q = question(title=u'tags tags tags', save=True)
# Add two question votes
questionvote(question=q, save=True)
questionvote(question=q, save=True)
self.refresh()
# Advanced search for questions with num_votes > 5. The above
# question should be not in this set.
response = self.client.get(reverse('search.advanced'), {
'q': '', 'tags': 'desktop', 'w': '2', 'a': '1',
'num_voted': 2, 'num_votes': 5,
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 0)
# Advanced search for questions with num_votes < 1. The above
# question should be not in this set.
response = self.client.get(reverse('search.advanced'), {
'q': '', 'tags': 'desktop', 'w': '2', 'a': '1',
'num_voted': 1, 'num_votes': 1,
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 0)
def test_num_votes_none(self):
"""Tests num_voted filtering where num_votes is ''"""
q = question(save=True)
questionvote(question=q, save=True)
self.refresh()
qs = {'q': '', 'w': 2, 'a': 1, 'num_voted': 2, 'num_votes': ''}
response = self.client.get(reverse('search.advanced'), qs)
eq_(200, response.status_code)
def test_forums_search(self):
"""This tests whether forum posts show up in searches"""
thread1 = thread(title=u'crash', save=True)
post(thread=thread1, save=True)
self.refresh()
response = self.client.get(reverse('search.advanced'), {
'author': '', 'created': '0', 'created_date': '',
'updated': '0', 'updated_date': '', 'sortby': '0',
'a': '1', 'w': '4', 'q': 'crash',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 1)
def test_forums_search_authorized_forums(self):
"""Only authorized people can search certain forums"""
# Create two threads: one in a restricted forum and one not.
forum1 = forum(name=u'ou812forum', save=True)
thread1 = thread(forum=forum1, save=True)
post(thread=thread1, content=u'audio', save=True)
forum2 = restricted_forum(name=u'restrictedkeepout', save=True)
thread2 = thread(forum=forum2, save=True)
post(thread=thread2, content=u'audio restricted', save=True)
self.refresh()
# Do a search as an anonymous user but don't specify the
# forums to filter on. Should only see one of the posts.
response = self.client.get(reverse('search.advanced'), {
'author': '',
'created': '0',
'created_date': '',
'updated': '0',
'updated_date': '',
'sortby': '0',
'a': '1',
'w': '4',
'q': 'audio',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 1)
# Do a search as an authorized user but don't specify the
# forums to filter on. Should see both posts.
u = user(save=True)
g = group(save=True)
g.user_set.add(u)
ct = ContentType.objects.get_for_model(forum2)
permission(codename='forums_forum.view_in_forum', content_type=ct,
object_id=forum2.id, group=g, save=True)
self.client.login(username=u.username, password='testpass')
response = self.client.get(reverse('search.advanced'), {
'author': '',
'created': '0',
'created_date': '',
'updated': '0',
'updated_date': '',
'sortby': '0',
'a': '1',
'w': '4',
'q': 'audio',
'format': 'json'
})
# Sees both results
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 2)
def test_forums_search_authorized_forums_specifying_forums(self):
"""Only authorized people can search certain forums they specified"""
# Create two threads: one in a restricted forum and one not.
forum1 = forum(name=u'ou812forum', save=True)
thread1 = thread(forum=forum1, save=True)
post(thread=thread1, content=u'audio', save=True)
forum2 = restricted_forum(name=u'restrictedkeepout', save=True)
thread2 = thread(forum=forum2, save=True)
post(thread=thread2, content=u'audio restricted', save=True)
self.refresh()
# Do a search as an anonymous user and specify both
# forums. Should only see the post from the unrestricted
# forum.
response = self.client.get(reverse('search.advanced'), {
'author': '',
'created': '0',
'created_date': '',
'updated': '0',
'updated_date': '',
'sortby': '0',
'forum': [forum1.id, forum2.id],
'a': '1',
'w': '4',
'q': 'audio',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 1)
# Do a search as an authorized user and specify both
# forums. Should see both posts.
u = user(save=True)
g = group(save=True)
g.user_set.add(u)
ct = ContentType.objects.get_for_model(forum2)
permission(codename='forums_forum.view_in_forum', content_type=ct,
object_id=forum2.id, group=g, save=True)
self.client.login(username=u.username, password='testpass')
response = self.client.get(reverse('search.advanced'), {
'author': '',
'created': '0',
'created_date': '',
'updated': '0',
'updated_date': '',
'sortby': '0',
'forum': [forum1.id, forum2.id],
'a': '1',
'w': '4',
'q': 'audio',
'format': 'json'
})
# Sees both results
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 2)
def test_forums_thread_created(self):
"""Tests created/created_date filtering for forums"""
post_created_ds = datetime(2010, 1, 1, 12, 00)
thread1 = thread(title=u'crash', created=post_created_ds, save=True)
post(thread=thread1,
created=(post_created_ds + timedelta(hours=1)),
save=True)
self.refresh()
# The thread/post should not show up in results for items
# created AFTER 1/12/2010.
response = self.client.get(reverse('search.advanced'), {
'author': '', 'created': '2', 'created_date': '01/12/2010',
'updated': '0', 'updated_date': '', 'sortby': '0',
'a': '1', 'w': '4', 'q': 'crash',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 0)
# The thread/post should show up in results for items created
# AFTER 1/1/2010.
response = self.client.get(reverse('search.advanced'), {
'author': '', 'created': '2', 'created_date': '01/01/2010',
'updated': '0', 'updated_date': '', 'sortby': '0',
'a': '1', 'w': '4', 'q': 'crash',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 1)
# The thread/post should show up in results for items created
# BEFORE 1/12/2010.
response = self.client.get(reverse('search.advanced'), {
'author': '', 'created': '1', 'created_date': '01/12/2010',
'updated': '0', 'updated_date': '', 'sortby': '0',
'a': '1', 'w': '4', 'q': 'crash',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 1)
# The thread/post should NOT show up in results for items
# created BEFORE 12/31/2009.
response = self.client.get(reverse('search.advanced'), {
'author': '', 'created': '1', 'created_date': '12/31/2009',
'updated': '0', 'updated_date': '', 'sortby': '0',
'a': '1', 'w': '4', 'q': 'crash',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 0)
def test_multi_word_tag_search(self):
"""Tests searching for tags with spaces in them"""
ques = question(title=u'audio', save=True)
ques.tags.add(u'Windows 7')
self.refresh()
response = self.client.get(reverse('search.advanced'), {
'q': 'audio', 'q_tags': 'Windows 7', 'w': '2', 'a': '1',
'sortby': '0', 'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 1)
def test_category_invalid(self):
"""Tests passing an invalid category"""
# wiki and questions
ques = question(title=u'q1 audio', save=True)
ques.tags.add(u'desktop')
ans = answer(question=ques, save=True)
answervote(answer=ans, helpful=True, save=True)
d1 = document(title=u'd1 audio', locale=u'en-US', category=10,
is_archived=False, save=True)
d1.tags.add(u'desktop')
revision(document=d1, is_approved=True, save=True)
self.refresh()
qs = {'a': 1, 'w': 3, 'format': 'json', 'category': 'invalid'}
response = self.client.get(reverse('search.advanced'), qs)
eq_(2, json.loads(response.content)['total'])
def test_created(self):
"""Basic functionality of created filter."""
created_ds = datetime(2010, 6, 19, 12, 00)
# on 6/19/2010
q1 = question(title=u'q1 audio', created=created_ds, save=True)
q1.tags.add(u'desktop')
ans = answer(question=q1, save=True)
answervote(answer=ans, helpful=True, save=True)
# on 6/21/2010
q2 = question(title=u'q2 audio',
created=(created_ds + timedelta(days=2)),
save=True)
q2.tags.add(u'desktop')
ans = answer(question=q2, save=True)
answervote(answer=ans, helpful=True, save=True)
self.refresh()
qs = {'a': 1, 'w': 2, 'format': 'json',
'sortby': 2, 'created_date': '06/20/2010'}
qs['created'] = constants.INTERVAL_BEFORE
response = self.client.get(reverse('search.advanced'), qs)
results = json.loads(response.content)['results']
eq_([q1.get_absolute_url()], [r['url'] for r in results])
qs['created'] = constants.INTERVAL_AFTER
response = self.client.get(reverse('search.advanced'), qs)
results = json.loads(response.content)['results']
eq_([q2.get_absolute_url()], [r['url'] for r in results])
def test_sortby_invalid(self):
"""Invalid created_date is ignored."""
qs = {'a': 1, 'w': 4, 'format': 'json', 'sortby': ''}
response = self.client.get(reverse('search.advanced'), qs)
eq_(200, response.status_code)
def test_created_date_invalid(self):
"""Invalid created_date is ignored."""
thread1 = thread(save=True)
post(thread=thread1, save=True)
self.refresh()
qs = {'a': 1, 'w': 4, 'format': 'json',
'created': constants.INTERVAL_AFTER,
'created_date': 'invalid'}
response = self.client.get(reverse('search.advanced'), qs)
eq_(1, json.loads(response.content)['total'])
def test_created_date_nonexistent(self):
"""created is set while created_date is left out of the query."""
qs = {'a': 1, 'w': 2, 'format': 'json', 'created': 1}
response = self.client.get(reverse('search.advanced'), qs)
eq_(200, response.status_code)
def test_updated_invalid(self):
"""Invalid updated_date is ignored."""
thread1 = thread(save=True)
post(thread=thread1, save=True)
self.refresh()
qs = {'a': 1, 'w': 4, 'format': 'json',
'updated': 1, 'updated_date': 'invalid'}
response = self.client.get(reverse('search.advanced'), qs)
eq_(1, json.loads(response.content)['total'])
def test_updated_nonexistent(self):
"""updated is set while updated_date is left out of the query."""
thread1 = thread(save=True)
post(thread=thread1, save=True)
self.refresh()
qs = {'a': 1, 'w': 2, 'format': 'json', 'updated': 1}
response = self.client.get(reverse('search.advanced'), qs)
eq_(response.status_code, 200)
def test_asked_by(self):
"""Check several author values, including test for (anon)"""
author_vals = (
('DoesNotExist', 0),
('jsocol', 2),
('pcraciunoiu', 2),
)
# Set up all the question data---creats users, creates the
# questions, shove it all in the index, then query it and see
# what happens.
for name, number in author_vals:
u = user(username=name, save=True)
for i in range(number):
ques = question(title=u'audio', creator=u, save=True)
ques.tags.add(u'desktop')
ans = answer(question=ques, save=True)
answervote(answer=ans, helpful=True, save=True)
self.refresh()
qs = {'a': 1, 'w': 2, 'format': 'json'}
for author, total in author_vals:
qs.update({'asked_by': author})
response = self.client.get(reverse('search.advanced'), qs)
eq_(total, json.loads(response.content)['total'])
def test_question_topics(self):
"""Search questions for topics."""
p = product(save=True)
t1 = topic(slug='doesnotexist', product=p, save=True)
t2 = topic(slug='cookies', product=p, save=True)
t3 = topic(slug='sync', product=p, save=True)
question(topic=t2, save=True)
question(topic=t2, save=True)
question(topic=t3, save=True)
self.refresh()
topic_vals = (
(t1.slug, 0),
(t2.slug, 2),
(t3.slug, 1),
)
qs = {'a': 1, 'w': 2, 'format': 'json'}
for topics, number in topic_vals:
qs.update({'topics': topics})
response = self.client.get(reverse('search.advanced'), qs)
eq_(number, json.loads(response.content)['total'])
def test_wiki_topics(self):
"""Search wiki for topics, includes multiple."""
t1 = topic(slug='doesnotexist', save=True)
t2 = topic(slug='extant', save=True)
t3 = topic(slug='tagged', save=True)
doc = document(locale=u'en-US', category=10, save=True)
doc.topics.add(t2)
revision(document=doc, is_approved=True, save=True)
doc = document(locale=u'en-US', category=10, save=True)
doc.topics.add(t2)
doc.topics.add(t3)
revision(document=doc, is_approved=True, save=True)
self.refresh()
topic_vals = (
(t1.slug, 0),
(t2.slug, 2),
(t3.slug, 1),
([t2.slug, t3.slug], 1),
)
qs = {'a': 1, 'w': 1, 'format': 'json'}
for topics, number in topic_vals:
qs.update({'topics': topics})
response = self.client.get(reverse('search.advanced'), qs)
eq_(number, json.loads(response.content)['total'])
def test_wiki_topics_inherit(self):
"""Translations inherit topics from their parents."""
doc = document(locale=u'en-US', category=10, save=True)
doc.topics.add(topic(slug='extant', save=True))
revision(document=doc, is_approved=True, save=True)
translated = document(locale=u'es', parent=doc, category=10,
save=True)
revision(document=translated, is_approved=True, save=True)
self.refresh()
qs = {'a': 1, 'w': 1, 'format': 'json', 'topics': 'extant'}
response = self.client.get(reverse('search.advanced', locale='es'), qs)
eq_(1, json.loads(response.content)['total'])
def test_question_products(self):
"""Search questions for products."""
p1 = product(slug='b2g', save=True)
p2 = product(slug='mobile', save=True)
p3 = product(slug='desktop', save=True)
question(product=p2, save=True)
question(product=p2, save=True)
question(product=p3, save=True)
self.refresh()
product_vals = (
(p1.slug, 0),
(p2.slug, 2),
(p3.slug, 1),
)
qs = {'a': 1, 'w': 2, 'format': 'json'}
for products, number in product_vals:
qs.update({'product': products})
response = self.client.get(reverse('search.advanced'), qs)
eq_(number, json.loads(response.content)['total'])
def test_wiki_products(self):
"""Search wiki for products."""
prod_vals = (
(product(slug='b2g', save=True), 0),
(product(slug='mobile', save=True), 1),
(product(slug='desktop', save=True), 2),
)
for prod, total in prod_vals:
for i in range(total):
doc = document(locale=u'en-US', category=10, save=True)
doc.products.add(prod)
revision(document=doc, is_approved=True, save=True)
self.refresh()
qs = {'a': 1, 'w': 1, 'format': 'json'}
for prod, total in prod_vals:
qs.update({'product': prod.slug})
response = self.client.get(reverse('search.advanced'), qs)
eq_(total, json.loads(response.content)['total'])
def test_wiki_products_inherit(self):
"""Translations inherit products from their parents."""
doc = document(locale=u'en-US', category=10, save=True)
p = product(title=u'Firefox', slug=u'desktop', save=True)
doc.products.add(p)
revision(document=doc, is_approved=True, save=True)
translated = document(locale=u'fr', parent=doc, category=10,
save=True)
revision(document=translated, is_approved=True, save=True)
self.refresh()
qs = {'a': 1, 'w': 1, 'format': 'json', 'product': p.slug}
response = self.client.get(reverse('search.advanced', locale='fr'), qs)
eq_(1, json.loads(response.content)['total'])
def test_discussion_filter_author(self):
"""Filter by author in discussion forums."""
author_vals = (
('DoesNotExist', 0),
('admin', 1),
('jsocol', 4),
)
for name, number in author_vals:
u = user(username=name, save=True)
for i in range(number):
thread1 = thread(title=u'audio', save=True)
post(thread=thread1, author=u, save=True)
self.refresh()
qs = {'a': 1, 'w': 4, 'format': 'json'}
for author, total in author_vals:
qs.update({'author': author})
response = self.client.get(reverse('search.advanced'), qs)
eq_(total, json.loads(response.content)['total'])
def test_discussion_filter_sticky(self):
"""Filter for sticky threads."""
thread1 = thread(title=u'audio', is_locked=True, is_sticky=True,
save=True)
post(thread=thread1, save=True)
self.refresh()
qs = {'a': 1, 'w': 4, 'format': 'json', 'thread_type': 1, 'forum': 1}
response = self.client.get(reverse('search.advanced'), qs)
results = json.loads(response.content)['results']
eq_(len(results), 1)
def test_discussion_filter_locked(self):
"""Filter for locked threads."""
thread1 = thread(title=u'audio', is_locked=True,
save=True)
post(thread=thread1, save=True)
self.refresh()
qs = {'a': 1, 'w': 4, 'format': 'json', 'thread_type': 2}
response = self.client.get(reverse('search.advanced'), qs)
results = json.loads(response.content)['results']
eq_(len(results), 1)
def test_discussion_filter_sticky_locked(self):
"""Filter for locked and sticky threads."""
thread1 = thread(title=u'audio', is_locked=True, is_sticky=True,
save=True)
post(thread=thread1, save=True)
self.refresh()
qs = {'a': 1, 'w': 4, 'format': 'json', 'thread_type': (1, 2)}
response = self.client.get(reverse('search.advanced'), qs)
result = json.loads(response.content)['results'][0]
eq_(thread1.get_absolute_url(), result['url'])
def test_forums_filter_updated(self):
"""Filter for updated date."""
post_updated_ds = datetime(2010, 5, 3, 12, 00)
thread1 = thread(title=u't1 audio', save=True)
post(thread=thread1, created=post_updated_ds, save=True)
thread2 = thread(title=u't2 audio', save=True)
post(thread=thread2,
created=(post_updated_ds + timedelta(days=2)),
save=True)
self.refresh()
qs = {'a': 1, 'w': 4, 'format': 'json',
'sortby': 1, 'updated_date': '05/04/2010'}
qs['updated'] = constants.INTERVAL_BEFORE
response = self.client.get(reverse('search.advanced'), qs)
results = json.loads(response.content)['results']
eq_([thread1.get_absolute_url()], [r['url'] for r in results])
qs['updated'] = constants.INTERVAL_AFTER
response = self.client.get(reverse('search.advanced'), qs)
results = json.loads(response.content)['results']
eq_([thread2.get_absolute_url()], [r['url'] for r in results])
def test_archived(self):
"""Ensure archived articles show only when requested."""
doc = document(title=u'impalas', locale=u'en-US',
is_archived=True, save=True)
revision(document=doc, summary=u'impalas',
is_approved=True, save=True)
self.refresh()
# include_archived gets the above document
qs = {'q': 'impalas', 'a': 1, 'w': 1, 'format': 'json',
'include_archived': 'on'}
response = self.client.get(reverse('search.advanced'), qs)
results = json.loads(response.content)['results']
eq_(1, len(results))
# no include_archived gets you nothing since the only
# document in the index is archived
qs = {'q': 'impalas', 'a': 0, 'w': 1, 'format': 'json'}
response = self.client.get(reverse('search.advanced'), qs)
results = json.loads(response.content)['results']
eq_(0, len(results))
def test_discussion_filter_forum(self):
"""Filter by forum in discussion forums."""
forum1 = forum(name=u'Forum 1', save=True)
thread1 = thread(forum=forum1, title=u'audio 1', save=True)
post(thread=thread1, save=True)
forum2 = forum(name=u'Forum 2', save=True)
thread2 = thread(forum=forum2, title=u'audio 2', save=True)
post(thread=thread2, save=True)
self.refresh()
qs = {'a': 1, 'w': 4, 'format': 'json'}
for forum_id in (forum1.id, forum2.id):
qs['forum'] = int(forum_id)
response = self.client.get(reverse('search.advanced'), qs)
eq_(json.loads(response.content)['total'], 1)
def test_discussion_forum_with_restricted_forums(self):
"""Tests who can see restricted forums in search form."""
# This is a long test, but it saves us from doing the setup
# twice.
forum1 = forum(name=u'ou812forum', save=True)
thread1 = thread(forum=forum1, title=u'audio 2', save=True)
post(thread=thread1, save=True)
forum2 = restricted_forum(name=u'restrictedkeepout', save=True)
thread2 = thread(forum=forum2, title=u'audio 2', save=True)
post(thread=thread2, save=True)
self.refresh()
# Get the Advanced Search Form as an anonymous user
response = self.client.get(reverse('search.advanced'), {'a': '2'})
eq_(200, response.status_code)
# Regular forum should show up
assert 'ou812forum' in response.content
# Restricted forum should not show up
assert 'restrictedkeepout' not in response.content
u = user(save=True)
g = group(save=True)
g.user_set.add(u)
ct = ContentType.objects.get_for_model(forum2)
permission(codename='forums_forum.view_in_forum', content_type=ct,
object_id=forum2.id, group=g, save=True)
# Get the Advanced Search Form as a logged in user
self.client.login(username=u.username, password='testpass')
response = self.client.get(reverse('search.advanced'), {'a': '2'})
eq_(200, response.status_code)
# Both forums should show up for authorized user
assert 'ou812forum' in response.content
assert 'restrictedkeepout' in response.content
|
Osmose/kitsune
|
kitsune/search/tests/test_search_advanced.py
|
Python
|
bsd-3-clause
| 31,926 | 0 |
# ********************************************************************** <====
from artmgr.transport.basew import BaseWTransport
# ********************************************************************** ====>
import os
import sys
import errno
import stat
import re
# chunksize for reading/writing local files
CHUNK = 8192
# ---------------------------------------------------------------------
def mkpath_recursive(path):
"""Test a local path and, if it does not exist, create it recursively"""
try:
mode = os.stat( path ).st_mode
if not stat.S_ISDIR(mode):
raise InvalidArgumentError("parent path '"+str(path)+"' not a dir")
except OSError as e:
if e.errno != errno.ENOENT:
raise
(head,tail) = os.path.split( path )
if head:
mkpath_recursive( head )
os.mkdir( path )
# ---------------------------------------------------------------------
class LocalTransport( BaseWTransport ):
"""
A full R/W transport instance that uses a locally visible directory to
store and read all artifact data
"""
def __init__( self, basedir, subrepo ):
"""
Constructor
@param basedir (str): local folder to use
@param subrepo (str): name of the repository we are dealing with
"""
if not basedir:
raise InvalidArgumentError("Empty basedir in local transport")
if not subrepo:
raise InvalidArgumentError("Empty subrepo in local transport")
self._basedir = os.path.join(basedir,subrepo)
super(LocalTransport,self).__init__()
def init_base( self ):
"""Ensure the base path for the repository exists"""
mkpath_recursive( self._basedir )
def get( self, sourcename, dest ):
"""
Read a file into a file-like destination.
@param sourcename (str): name of the file in remote repo
@param dest (file): an object with a write() method
@return (bool): \c True if ok, \c False if the file does not exist
"""
name = os.path.join(self._basedir,sourcename)
try:
with open(name, 'rb') as f:
while True:
bytes = f.read( CHUNK )
if not bytes:
break
dest.write( bytes )
return True
except IOError as e:
if e.errno == errno.ENOENT:
return False
raise
def otype( self, path ):
"""
Given the path of am object, return:
* 'F' for a file,
* 'D' for a directory,
* \c None if the path does not exist
"""
oldname = os.path.join(self._basedir,path)
try:
mode = os.stat( oldname ).st_mode
except OSError as e:
if e.errno == errno.ENOENT:
return None
raise
return 'D' if stat.S_ISDIR(mode) else 'F' if stat.S_ISREG(mode) else '?'
def put( self, source, destname ):
"""
Store a file. If a file with the same name existed, it is overwritten
@param source (file): an object with a read() method
@param destname (str): name of the destination file,
relative to repo base directory
"""
name = os.path.join(self._basedir,destname)
with open(name, 'wb') as f:
while True:
bytes = source.read( CHUNK )
if not bytes:
break
f.write( bytes )
def delete( self, filename ):
"""
Delete a file
"""
name = os.path.join(self._basedir,filename)
os.unlink( name )
def rename( self, oldname, newname ):
"""
Rename a file into a new name and/or folder
"""
oldname = os.path.join(self._basedir,oldname)
newname = os.path.join(self._basedir,newname)
os.rename( oldname, newname )
def folder_create( self, path ):
"""
Make a folder in the repository, assuming all parent folders exist
"""
os.mkdir( os.path.join(self._basedir,path) )
def folder_list( self, path ):
"""
Return the list of all components (files & folders) in a folder
*This method is optional*
"""
return os.listdir( os.path.join(self._basedir,path) )
|
paulovn/artifact-manager
|
lib/artmgr/transport/local.py
|
Python
|
gpl-2.0
| 4,422 | 0.015604 |
#author :haiyfu
#date:April 14
#description:
#contact:haiyangfu512@gmail.com
"""
This little part is to check how many different values in
a column and store the unqiue values in a list.
For FCBF initially.
The last column is the class .
"""
from sys import argv
#only count the target file and return
#a list structure which contains the detail
#information,like [23, [[1,23],[11,233]], 34 ]
#Here is the correspond meanings
#[attribure_number,[ [first-column-different-values] [2nd-df-val] ],line_num]
def rc_gn(sn):
fin=open(sn)
atrn=len(fin.readline().split(","))
#Initialize the result list
fin.seek(0,0)
rc=[]
rc.append(atrn)
rc.append([])
l=fin.readline().strip("\r \n ").split(",")
for x in l:
rc[1].append([x])
count=0
for l in fin:
l=l.strip("\n \r").split(",")
idx=0
if(len(l)<rc[0]):
break
for x in l:
if x not in rc[1][idx]:
rc[1][idx].append(x)
rc[1][idx].sort()
idx=idx+1
count=count+1
#print rc
rc.append(count+1)
fin.close()
return rc
def wrt_rc(rc,tn):
#print rc
ft=open(tn,"w")
#class info
ft.write(str(len(rc[1][-1]))+","+",".join(rc[1][-1])+".\n" )
#attribure number
ft.write(str( rc[0]-1 )+"\n")
#every attribure info
for x in range(rc[0]-1):
sl="A"+str(x+1)+" - "+",".join(rc[1][x])+".\n"
ft.write(sl)
ft.close()
if __name__=="__main__":
script_nm,src_file,out_file=argv
wrt_rc(rc_gn(src_file),out_file)
|
haiy/XF_PRISM
|
src/XF-Prism/rc_generator.py
|
Python
|
gpl-3.0
| 1,587 | 0.028986 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Simple scrambling test generator."""
import copy
import random
from typing import List, Text, Optional
from lit_nlp.api import components as lit_components
from lit_nlp.api import dataset as lit_dataset
from lit_nlp.api import model as lit_model
from lit_nlp.api import types
from lit_nlp.lib import utils
JsonDict = types.JsonDict
FIELDS_TO_SCRAMBLE_KEY = 'Fields to scramble'
class Scrambler(lit_components.Generator):
"""Scramble all words in an example to generate a new example."""
@staticmethod
def scramble(val: Text) -> Text:
words = val.split(' ')
random.shuffle(words)
return ' '.join(words)
def config_spec(self) -> types.Spec:
return {
FIELDS_TO_SCRAMBLE_KEY:
types.MultiFieldMatcher(
spec='input',
types=['TextSegment'],
select_all=True),
}
def generate(self,
example: JsonDict,
model: lit_model.Model,
dataset: lit_dataset.Dataset,
config: Optional[JsonDict] = None) -> List[JsonDict]:
"""Naively scramble all words in an example.
Note: Even if more than one field is to be scrambled, only a single example
will be produced, unlike other generators which will produce multiple
examples, one per field.
Args:
example: the example used for basis of generated examples.
model: the model.
dataset: the dataset.
config: user-provided config properties.
Returns:
examples: a list of generated examples.
"""
del model # Unused.
config = config or {}
# If config key is missing, generate no examples.
fields_to_scramble = list(config.get(FIELDS_TO_SCRAMBLE_KEY, []))
if not fields_to_scramble:
return []
# TODO(lit-dev): move this to generate_all(), so we read the spec once
# instead of on every example.
text_keys = utils.find_spec_keys(dataset.spec(), types.TextSegment)
if not text_keys:
return []
text_keys = [key for key in text_keys if key in fields_to_scramble]
new_example = copy.deepcopy(example)
for text_key in text_keys:
new_example[text_key] = self.scramble(example[text_key])
return [new_example]
|
PAIR-code/lit
|
lit_nlp/components/scrambler.py
|
Python
|
apache-2.0
| 2,899 | 0.00276 |
import re
import html
# The regular string.split() only takes a max number of splits,
# but it won't unpack if there aren't enough values.
# This function ensures that we always get the wanted
# number of returned values, even if the string doesn't include
# as many splits values as we want, simply by filling in extra
# empty strings at the end.
#
# Some examples:
# split("a b c d", " ", 3) = ["a", "b", "c d"]
# split("a b c" , " ", 3) = ["a", "b", "c"]
# split("a b", " ", 3) = ["a", "b", ""]
def split(s, sep, count):
return (s + ((count - 1 - s.count(sep)) * sep)).split(sep, count - 1)
# Sanitize a string by removing all new lines and extra spaces
def sanitize_string(s):
return " ".join(s.split()).strip()
# Unescape HTML/XML entities
def unescape_entities(text):
def replace_entity(match):
try:
if match.group(1) in html.entities.name2codepoint:
return chr(html.entities.name2codepoint[match.group(1)])
elif match.group(1).lower().startswith("#x"):
return chr(int(match.group(1)[2:], 16))
elif match.group(1).startswith("#"):
return chr(int(match.group(1)[1:]))
except (ValueError, KeyError):
pass # Fall through to default return
return match.group(0)
return re.sub(r"&([#a-zA-Z0-9]+);", replace_entity, text)
|
Tigge/platinumshrimp
|
utils/str_utils.py
|
Python
|
mit
| 1,370 | 0.00073 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import sys
import re
from setuptools import setup
if sys.argv[-1] == "publish":
os.system("python setup.py sdist upload")
sys.exit()
packages = [
"the_big_username_blacklist"
]
# Handle requirements
install_requires = []
tests_requires = [
"pytest==3.0.5",
]
# Convert markdown to rst
try:
from pypandoc import convert
long_description = convert("README.md", "rst")
except:
long_description = ""
version = ''
with io.open('the_big_username_blacklist/__init__.py', 'r', encoding='utf-8') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
setup(
name="the_big_username_blacklist",
version=version,
description="Validate usernames against a blacklist", # NOQA
long_description=long_description,
author="Martin Sandström",
author_email="martin@marteinn.se",
url="https://github.com/marteinn/the-big-username-blacklist-python",
packages=packages,
package_data={"": ["LICENSE", ], "the_big_username_blacklist": ["*.txt"]},
package_dir={"the_big_username_blacklist": "the_big_username_blacklist"},
include_package_data=True,
install_requires=install_requires,
license="MIT",
zip_safe=False,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: PyPy"
],
)
|
marteinn/The-Big-Username-Blacklist-Python
|
setup.py
|
Python
|
mit
| 1,848 | 0.001083 |
#SBaaS
from .stage02_physiology_graphData_io import stage02_physiology_graphData_io
from SBaaS_models.models_COBRA_execute import models_COBRA_execute
from .stage02_physiology_analysis_query import stage02_physiology_analysis_query
#System
import copy
class stage02_physiology_graphData_execute(stage02_physiology_graphData_io):
def execute_findShortestPaths(self,
analysis_id_I,
algorithms_params_I,
nodes_startAndStop_I,
exclusion_list_I=[],
weights_I=[],
):
'''
compute the shortest paths
INPUT:
model_id_I
algorithms_params_I
nodes_startAndStop_I
simulation_id_I
exclusion_list_I
OUTPUT:
'''
exCOBRA01 = models_COBRA_execute(self.session,self.engine,self.settings);
exCOBRA01.initialize_supportedTables();
physiology_analysis_query = stage02_physiology_analysis_query(self.session,self.engine,self.settings);
physiology_analysis_query.initialize_supportedTables();
data_O=[];
data_graphs_O=[];
rows = physiology_analysis_query.getJoin_analysisID_dataStage02PhysiologyAnalysisAndSimulation(analysis_id_I);
for row in rows:
weights = [];
if type(weights_I)==type([]):
weights = weights_I;
weights_str = '[]';
elif type(weights_I)==type(''):
if weights_I == 'stage02_physiology_sampledData_query':
weights = self.import_graphWeights_sampledData(row['simulation_id']);
weights_str = 'stage02_physiology_sampledData_query';
elif weights_I == 'stage02_physiology_simulatedData_query':
weights = self.import_graphWeights_simulatedData(row['simulation_id']);
weights_str = 'stage02_physiology_simulatedData_query';
else:
print('weights source not recognized');
# run the analysis for different algorithms/params
for ap in algorithms_params_I:
shortestPaths = exCOBRA01.execute_findShortestPath_nodes(
row['model_id'],
nodes_startAndStop_I = nodes_startAndStop_I,
algorithm_I=ap['algorithm'],
exclusion_list_I=exclusion_list_I,
params_I=ap['params'],
weights_I=weights
)
for sp in shortestPaths:
tmp = {};
tmp['analysis_id']=analysis_id_I
tmp['simulation_id']=row['simulation_id']
tmp['weights']=weights_str;
tmp['used_']=True;
tmp['comment_']=None;
tmp['params']=sp['params']
tmp['path_start']=sp['start']
tmp['algorithm']=sp['algorithm']
tmp['path_stop']=sp['stop']
tmp1 = copy.copy(tmp);
tmp1['path_n']=sp['path_n']
tmp1['path_iq_1']=sp['path_iq_1']
tmp1['path_var']=sp['path_var']
tmp1['path_ci_lb']=sp['path_ci_lb']
tmp1['path_cv']=sp['path_cv']
tmp1['path_iq_3']=sp['path_iq_3']
tmp1['path_ci_ub']=sp['path_ci_ub']
tmp1['path_average']=sp['path_average']
tmp1['path_max']=sp['path_max']
tmp1['path_median']=sp['path_median']
tmp1['path_ci_level']=sp['path_ci_level']
tmp1['path_min']=sp['path_min']
data_O.append(tmp1);
for path in sp['all_paths']:
tmp2 = copy.copy(tmp);
tmp2['paths']=path;
data_graphs_O.append(tmp2);
#for sp in shortestPaths:
#dict_keys(['stop', 'params', 'path_n', 'all_paths', 'path_iq_1', 'path_var', 'path_ci_lb', 'path_cv', 'path_iq_3', 'path_ci_ub', 'path_average', 'path_max', 'path_median', 'start', 'algorithm', 'path_ci_level', 'path_min'])
# str = "start: %s, stop: %s, min: %s, max: %s, average: %s, " \
# %(sp['start'],sp['stop'],sp['path_min'],
# sp['path_max'],sp['path_average'])
# print(str)
self.add_rows_table('data_stage02_physiology_graphData_shortestPathStats',data_O);
self.add_rows_table('data_stage02_physiology_graphData_shortestPaths',data_graphs_O);
|
dmccloskey/SBaaS_COBRA
|
SBaaS_COBRA/stage02_physiology_graphData_execute.py
|
Python
|
mit
| 4,655 | 0.017401 |
from __future__ import absolute_import, division, print_function
from dynd._pydynd import w_type, \
make_var_dim, make_strided_dim, make_fixed_dim, make_cfixed_dim
__all__ = ['var', 'strided', 'fixed', 'cfixed']
class _Dim(object):
__slots__ = []
def __mul__(self, rhs):
if isinstance(rhs, w_type):
# Apply all the dimensions to get
# produce a type
for dim in reversed(self.dims):
rhs = dim.create(rhs)
return rhs
elif isinstance(rhs, (str, type)):
# Allow:
# ndt.strided * 'int32'
# ndt.strided * int
rhs = w_type(rhs)
for dim in reversed(self.dims):
rhs = dim.create(rhs)
return rhs
elif isinstance(rhs, _Dim):
# Combine the dimension fragments
return _DimFragment(self.dims + rhs.dims)
else:
raise TypeError('Expected a dynd dimension or type, not %r' % rhs)
def __pow__(self, count):
return _DimFragment(self.dims * count)
class _DimFragment(_Dim):
__slots__ = ['dims']
def __init__(self, dims):
self.dims = dims
def __repr__(self):
return ' * '.join(repr(dim) for dim in self.dims)
class _Var(_Dim):
"""
Creates a var dimension when combined with other types.
Examples
--------
>>> ndt.var * ndt.int32
ndt.type('var * int32')
>>> ndt.fixed[5] * ndt.var * ndt.float64
ndt.type('5 * var * float64')
"""
__slots__ = []
@property
def dims(self):
return (self,)
def create(self, eltype):
return make_var_dim(eltype)
def __repr__(self):
return 'ndt.var'
class _Strided(_Dim):
"""
Creates a strided dimension when combined with other types.
Examples
--------
>>> ndt.strided * ndt.int32
ndt.type('strided * int32')
>>> ndt.fixed[5] * ndt.strided * ndt.float64
ndt.type('5 * strided * float64')
"""
__slots__ = []
@property
def dims(self):
return (self,)
def create(self, eltype):
return make_strided_dim(eltype)
def __repr__(self):
return 'ndt.strided'
class _Fixed(_Dim):
"""
Creates a fixed dimension when combined with other types.
Examples
--------
>>> ndt.fixed[3] * ndt.int32
ndt.type('3 * int32')
>>> ndt.fixed[5] * ndt.var * ndt.float64
ndt.type('5 * var * float64')
"""
__slots__ = ['dim_size']
def __init__(self, dim_size = None):
self.dim_size = dim_size
@property
def dims(self):
if self.dim_size is not None:
return (self,)
else:
raise TypeError('Need to specify ndt.fixed[dim_size],' +
' not just ndt.fixed')
def create(self, eltype):
return make_fixed_dim(self.dim_size, eltype)
def __getitem__(self, dim_size):
return _Fixed(dim_size)
def __repr__(self):
if self.dim_size is not None:
return 'ndt.fixed[%d]' % self.dim_size
else:
return 'ndt.fixed'
class _CFixed(_Dim):
"""
Creates a cfixed dimension when combined with other types.
Examples
--------
>>> ndt.cfixed[3] * ndt.int32
ndt.type('cfixed[3] * int32')
>>> ndt.fixed[5] * ndt.cfixed[2] * ndt.float64
ndt.type('5 * cfixed[2] * float64')
"""
__slots__ = ['dim_size']
def __init__(self, dim_size = None):
self.dim_size = dim_size
@property
def dims(self):
if self.dim_size is not None:
return (self,)
else:
raise TypeError('Need to specify ndt.cfixed[dim_size],' +
' not just ndt.cfixed')
def create(self, eltype):
return make_cfixed_dim(self.dim_size, eltype)
def __getitem__(self, dim_size):
return _CFixed(dim_size)
def __repr__(self):
if self.dim_size is not None:
return 'ndt.cfixed[%d]' % self.dim_size
else:
return 'ndt.cfixed'
var = _Var()
strided = _Strided()
fixed = _Fixed()
cfixed = _CFixed()
|
aterrel/dynd-python
|
dynd/ndt/dim_helpers.py
|
Python
|
bsd-2-clause
| 4,145 | 0.000965 |
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from django.urls import reverse
from django.utils.encoding import python_2_unicode_compatible
from extras.models import CustomFieldModel, CustomFieldValue
from utilities.models import CreatedUpdatedModel
from utilities.utils import csv_format
@python_2_unicode_compatible
class TenantGroup(models.Model):
"""
An arbitrary collection of Tenants.
"""
name = models.CharField(max_length=50, unique=True)
slug = models.SlugField(unique=True)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return "{}?group={}".format(reverse('tenancy:tenant_list'), self.slug)
@python_2_unicode_compatible
class Tenant(CreatedUpdatedModel, CustomFieldModel):
"""
A Tenant represents an organization served by the NetBox owner. This is typically a customer or an internal
department.
"""
name = models.CharField(max_length=30, unique=True)
slug = models.SlugField(unique=True)
group = models.ForeignKey('TenantGroup', related_name='tenants', blank=True, null=True, on_delete=models.SET_NULL)
description = models.CharField(max_length=100, blank=True, help_text="Long-form name (optional)")
comments = models.TextField(blank=True)
custom_field_values = GenericRelation(CustomFieldValue, content_type_field='obj_type', object_id_field='obj_id')
class Meta:
ordering = ['group', 'name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('tenancy:tenant', args=[self.slug])
def to_csv(self):
return csv_format([
self.name,
self.slug,
self.group.name if self.group else None,
self.description,
])
|
Alphalink/netbox
|
netbox/tenancy/models.py
|
Python
|
apache-2.0
| 1,852 | 0.00216 |
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
import requests
import json
import logging
from bs4 import BeautifulSoup as htmldoc
def carrier_lookup():
return None
class CarrierLookup(object):
def __init__(self, number, logname=None):
self.number = number
self._logname = logname if logname else ''
self.log = logging.getLogger(self._logname)
def lookup(self):
log = self.log
domain = 'www.twilio.com'
host = 'https://{0}'.format(domain)
lookup = '{0}/lookup'.format(host)
# masquerade as OS-X Firefox
s = requests.Session()
s.headers['user-agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:37.0) Gecko/20100101 Firefox/37.0'
s.headers['x-requested-with'] = 'XMLHttpRequest'
s.headers['accept-language'] = 'en-US,en;q=0.5'
s.headers['cache-control'] = 'no-cache'
s.headers['content-type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
s.headers['host'] = domain
s.headers['DNT'] = '1'
s.headers['connection'] = 'close'
# fetch the base page to set the cookies and get csrf and sid values
r = s.get(lookup)
hdrs = {k: v for k, v in s.headers.iteritems()}
cookies = [{c.name: c.value} for c in s.cookies]
log.debug('\nsession headers: {0}\n'.format(jsonify(hdrs)))
log.debug('\nsession cookies: {0}\n'.format(jsonify(cookies)))
if not cookies:
log.error('unknown error accessing base page: {0}'.format(lookup))
log.error('ERROR: {0}'.format(r.status_code))
log.error(r.text)
raise ValueError()
# extract the csrf and sid
page = htmldoc(r.text)
token = page.find('meta', attrs={'name': 'csrfToken'})
if token is None:
log.debug(r.text)
csrf = token['content']
log.debug('NAME={0} CONTENT={1}'.format(token['name'], csrf))
sid_attrs = {'type': 'hidden', 'role': 'visitorSid'}
role = page.find('input', attrs=sid_attrs)
sid = role['value']
log.debug('ROLE={0} VALUE={1}'.format(role['role'], sid))
# retrieve the phone number information
s.headers['referer'] = lookup
params = {
'Type': 'lookup',
'PhoneNumber': "{0}".format(self.number),
'VisitorSid': sid,
'CSRF': csrf,
}
log.debug('\nparams: {0}\n'.format(jsonify(params)))
url = '{0}/functional-demos'.format(host)
r = s.post(url, params=params)
info = json.loads(r.content)
return info
|
neuroticnerd/armory
|
armory/phone/lookup.py
|
Python
|
apache-2.0
| 2,647 | 0.000756 |
#!/usr/bin/env python
#
# Copyright 2014 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Configment interface
>>> class TestCfg(Configment):
... CONFIGSPEC_SOURCE = '''
... [abc]
... x = integer(default=3)
... '''
>>> cfg = TestCfg()
>>> cfg["abc"]["x"]
3
>>>
"""
import os
import validate
import six
from .configobj_wrap import ConfigObjWrap
from .meta_configment import MetaConfigment
from .configment_validator import ConfigmentValidator
from .pathname import Pathname
from .environment import load_configspec
__author__ = "Simone Campagna"
__all__ = [
'create_configment_class',
'Configment',
'ConfigmentValidateError',
]
class ConfigmentValidateError(validate.ValidateError):
def __str__(self):
return "validation failed: {}".format(self.args[0])
class BaseConfigment(ConfigObjWrap):
CONFIGSPEC = None
DEFAULT_MODE_HIDE = "hide"
DEFAULT_MODE_SHOW = "show"
DEFAULT_MODES = [DEFAULT_MODE_HIDE, DEFAULT_MODE_SHOW]
DEFAULT_MODE = DEFAULT_MODE_HIDE
def __init__(self, filename=None, default_mode=None):
super(BaseConfigment, self).__init__(
infile=None,
configspec=self.__class__.CONFIGSPEC,
unrepr=True,
interpolation=False,
indent_type=" ",
stringify=True,
)
if default_mode is None:
default_mode = self.DEFAULT_MODE
self.default_mode = default_mode
self.set_filename(filename)
if self.filename is not None:
self.load_file(filename, throw_on_errors=True)
else:
self.initialize(throw_on_errors=False)
def set_filename(self, filename=None):
super(BaseConfigment, self).set_filename(filename)
if self.filename is None:
self._base_dir = os.getcwd()
else:
self._base_dir = os.path.dirname(os.path.abspath(filename))
def do_validation(self, base_dir=None, reset=False, throw_on_errors=False):
if base_dir is None:
base_dir = self._base_dir
validator = ConfigmentValidator()
copy = self.default_mode == self.DEFAULT_MODE_SHOW
result = super(BaseConfigment, self).validate(validator, preserve_errors=True, copy=copy)
result = self.filter_validation_result(result)
self.set_paths(base_dir, reset=reset)
if throw_on_errors and result:
raise ConfigmentValidateError(result)
c_result = ConfigObjWrap(
infile=result,
stringify=True,
unrepr=True,
indent_type=' ',
)
return c_result
@six.add_metaclass(MetaConfigment)
class Configment(BaseConfigment):
def __init__(self, filename=None, default_mode=None):
super(Configment, self).__init__(
filename=filename,
default_mode=default_mode,
)
def impl_initialize(self, throw_on_errors=False):
try:
return self.do_validation(reset=False, throw_on_errors=throw_on_errors)
except: # pylint: disable=bare-except
return False
def impl_load_file(self, filename, throw_on_errors=False):
default_base_dir = Pathname.get_default_base_dir()
Pathname.set_default_base_dir(self._base_dir)
self.set_filename(filename)
self.reload()
try:
result = self.do_validation(base_dir=self._base_dir, reset=True, throw_on_errors=throw_on_errors)
finally:
Pathname.set_default_base_dir(default_base_dir)
return result
def impl_dump_s(self, stream=None, filename=None, throw_on_errors=False):
default_base_dir = Pathname.get_default_base_dir()
try:
if filename is not None:
base_dir = os.path.dirname(os.path.normpath(os.path.abspath(filename)))
else:
base_dir = self._base_dir
Pathname.set_default_base_dir(base_dir)
self.do_validation(base_dir=base_dir, reset=False, throw_on_errors=throw_on_errors)
self.write(stream)
finally:
Pathname.set_default_base_dir(default_base_dir)
def create_configment_class(configspec_filename, class_name=None, dir_list=None):
if class_name is None:
class_name = os.path.splitext(os.path.basename(configspec_filename))[0]
class_bases = (Configment, )
class_dict = {
'CONFIGSPEC_SOURCE': load_configspec(configspec_filename, dir_list=dir_list),
}
return MetaConfigment(class_name, class_bases, class_dict)
|
simone-campagna/py-configment
|
src/configment/configment.py
|
Python
|
apache-2.0
| 5,069 | 0.001578 |
"""
compact_group.py - Part of millennium-compact-groups package
Defines CompactGroup object to handle information about a single
compact group.
Copyright(C) 2016 by
Trey Wenger; tvwenger@gmail.com
Chris Wiens; cdw9bf@virginia.edu
Kelsey Johnson; kej7a@virginia.edu
GNU General Public License v3 (GNU GPLv3)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
14 Mar 2016 - TVW Finalized version 1.0
"""
_PACK_NAME = 'millennium-compact-groups'
_PROG_NAME = 'compact_group.py'
_VERSION = 'v1.0'
# System utilities
import sys
import os
# Numerical utilities
import numpy as np
import pandas
class CompactGroup:
"""
Compact Group Object
"""
def __init__(self, label, members):
"""
Initialize ComactGroup Object
"""
self.label = label
self.members = members
self.median_vel = 0.0
self.mediod = None
self.radius = 0.0
self.avg_mvir = 0.0
self.avg_stellarmass = 0.0
self.num_nearby_galaxies = 0
self.neighbors = []
self.annular_mass_ratio = 0.0
self.secondtwo_mass_ratio = 0.0
def find_dwarfs(self,dwarf_limit):
"""
Find galaxies that have a stellar mass less than dwarf_limit
"""
# add a is_dwarf column to members
self.members['is_dwarf'] = np.zeros(len(self.members),dtype=bool)
# assign dwarfs
ind = self.members['stellarMass'] < dwarf_limit
self.members.ix[ind,'is_dwarf'] = True
def calc_median_velocity(self):
"""
Calculate the median velocity of galaxies in this group
"""
good = (~self.members['is_dwarf'])
vels = (self.members['velX']*self.members['velX'] +
self.members['velY']*self.members['velY'] +
self.members['velZ']*self.members['velZ'])**0.5
# add a velocity2 column to members
self.members['vel'] = vels
self.median_vel = np.median(vels[good])
def find_flybys(self,crit_velocity):
"""
Find galaxies that are travelling crit_velocity faster or
slower than median velocity of group. These are "fly-bys"
"""
# add a is_flyby column to members
self.members['is_flyby'] = np.zeros(len(self.members),dtype=bool)
# assign flybys
ind = np.abs(self.members['vel'] - self.median_vel) > crit_velocity
self.members.ix[ind,'is_flyby'] = True
def calc_mediod(self):
"""
Calculate the mediod center of this group, excluding
dwarfs and flybys
"""
good = ((~self.members['is_dwarf'])&(~self.members['is_flyby']))
x_med = np.median(self.members['x'][good])
y_med = np.median(self.members['y'][good])
z_med = np.median(self.members['z'][good])
self.mediod = np.array([x_med,y_med,z_med])
def calc_radius(self):
"""
Calculate the radius of this group, defined as the
maximum galaxy distance from the mediod, excluding
dwarfs and flybys
"""
good = ((~self.members['is_dwarf'])&(~self.members['is_flyby']))
xdist = self.members['x'][good]-self.mediod[0]
ydist = self.members['y'][good]-self.mediod[1]
zdist = self.members['z'][good]-self.mediod[2]
dists = (xdist*xdist + ydist*ydist + zdist*zdist)**0.5
self.radius = np.max(dists)
def calc_avg_mvir(self):
"""
Calculate the average virial mass of galaxies in this group
excluding dwafs and flybys
"""
good = ((~self.members['is_dwarf'])&(~self.members['is_flyby']))
if np.sum(good) == 0:
self.avg_mvir = np.nan
else:
self.avg_mvir = np.mean(self.members['mvir'][good])
def calc_avg_stellarmass(self):
"""
Calculate the average stellar mass of galaxies in this group
excluding dwafs and flybys
"""
good = ((~self.members['is_dwarf'])&(~self.members['is_flyby']))
if np.sum(good) == 0:
self.avg_stellarmass = np.nan
else:
self.avg_stellarmass = np.mean(self.members['stellarMass'][good])
def calc_annular_mass_ratio(self,radius):
"""
Calculate the virial mass ratio
of neighboring galaxies within the surrounding annulus to the
total virial mass of all galaxies within the sphere
"""
# mass of cluster
sphere_mass = np.sum(self.members['mvir'])
sphere_mass = sphere_mass / (4.*np.pi/3. * self.radius**3.)
# mass in annulus
annulus_mass = np.sum(self.neighbors['mvir'])
annulus_mass = annulus_mass/(4.*np.pi/3. * (radius**3. - self.radius**3.))
self.annular_mass_ratio = annulus_mass/sphere_mass
def calc_secondtwo_mass_ratio(self):
"""
Calculate the ratio of the virial masses of the second largest
members to the virial mass of the largest member
"""
sorted_masses = np.sort(self.members['mvir'])
self.secondtwo_mass_ratio = (sorted_masses[-2]+sorted_masses[-3])/sorted_masses[-1]
|
tvwenger/millennium-compact-groups
|
compact_group.py
|
Python
|
gpl-3.0
| 5,674 | 0.003877 |
from .test_base_class import ZhihuClientClassTest
PEOPLE_SLUG = 'giantchen'
class TestPeopleBadgeNumber(ZhihuClientClassTest):
def test_badge_topics_number(self):
self.assertEqual(
len(list(self.client.people(PEOPLE_SLUG).badge.topics)), 2,
)
def test_people_has_badge(self):
self.assertTrue(self.client.people(PEOPLE_SLUG).badge.has_badge)
def test_people_has_identity(self):
self.assertFalse(self.client.people(PEOPLE_SLUG).badge.has_identity)
def test_people_is_best_answerer_or_not(self):
self.assertTrue(self.client.people(PEOPLE_SLUG).badge.is_best_answerer)
def test_people_identify_information(self):
self.assertIsNone(self.client.people(PEOPLE_SLUG).badge.identity)
|
7sDream/zhihu-oauth
|
test/test_client_people_badge.py
|
Python
|
mit
| 762 | 0 |
#! /bin/python
import xbh as xbhpkg
xbh = xbhpkg.Xbh()
#xbh.switch_to_app()
xbh.calc_checksum()
print(xbh.get_results())
|
jhnphm/xbs_xbd
|
python/scripts/test.py
|
Python
|
gpl-3.0
| 122 | 0.008197 |
class FieldRegistry(object):
_registry = {}
def add_field(self, model, field):
reg = self.__class__._registry.setdefault(model, [])
reg.append(field)
def get_fields(self, model):
return self.__class__._registry.get(model, [])
def __contains__(self, model):
return model in self.__class__._registry
|
feroda/django-pro-history
|
current_user/registration.py
|
Python
|
agpl-3.0
| 349 | 0 |
"""
Generates 40 random numbers and writes them
to a file. No number is repeated.
~ Created by Elijah Wilson 2014 ~
"""
# used for generating random integers
from random import randint
# open the output file -> "in.data"
f = open("in.data", "w")
# create an empty list
succ = []
# loops through 40 times for generating numbers
for x in xrange(0,40):
# generate random int between 1111 & 9999
randNum = randint(1111, 9999)
# check to see if it was already generated
if randNum not in succ:
# put the random number in the list
succ.append(str(randNum))
else:
# while the randNum has already been generated
# generate a new one
while randNum in succ:
randNum = randint(1111, 9999)
# put the random number in the list
succ.append(str(randNum))
# loops through 40 times for writing to file
for x in xrange(0,40):
# makes sure it isn't the last line to be written
# to write a new line char
if x != 39:
f.write(succ[x] + "\n")
else:
# if it is the last line to be written
# don't write a new line char
f.write(succ[x])
#close the file
f.close()
|
tizz98/cs216
|
p11/rand.py
|
Python
|
unlicense
| 1,083 | 0.025854 |
# -*- coding: utf-8 -*-
###############################################################################
#
# ZipFile
# Creates a zipped version of the specified Box file and returns a link to the new compressed file.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ZipFile(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ZipFile Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ZipFile, self).__init__(temboo_session, '/Library/Box/Files/ZipFile')
def new_input_set(self):
return ZipFileInputSet()
def _make_result_set(self, result, path):
return ZipFileResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ZipFileChoreographyExecution(session, exec_id, path)
class ZipFileInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ZipFile
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved during the OAuth2 process.)
"""
super(ZipFileInputSet, self)._set_input('AccessToken', value)
def set_AsUser(self, value):
"""
Set the value of the AsUser input for this Choreo. ((optional, string) The ID of the user. Only used for enterprise administrators to make API calls for their managed users.)
"""
super(ZipFileInputSet, self)._set_input('AsUser', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The id of the file to zip.)
"""
super(ZipFileInputSet, self)._set_input('FileID', value)
def set_SharedLink(self, value):
"""
Set the value of the SharedLink input for this Choreo. ((conditional, json) A JSON object representing the item?s shared link and associated permissions. See documentation for formatting examples.)
"""
super(ZipFileInputSet, self)._set_input('SharedLink', value)
def set_ZipFileLocation(self, value):
"""
Set the value of the ZipFileLocation input for this Choreo. ((conditional, string) The id of the folder to put the new zip file in. When not specified, the zip file will be put in the root folder.)
"""
super(ZipFileInputSet, self)._set_input('ZipFileLocation', value)
def set_ZipFileName(self, value):
"""
Set the value of the ZipFileName input for this Choreo. ((required, string) The name of the zip file that will be created.)
"""
super(ZipFileInputSet, self)._set_input('ZipFileName', value)
class ZipFileResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ZipFile Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((string) The response from Box. This contains the newly created zip file metadata.)
"""
return self._output.get('Response', None)
def get_URL(self):
"""
Retrieve the value for the "URL" output from this Choreo execution. ((string) The url for the newly created zip file.)
"""
return self._output.get('URL', None)
class ZipFileChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ZipFileResultSet(response, path)
|
jordanemedlock/psychtruths
|
temboo/core/Library/Box/Files/ZipFile.py
|
Python
|
apache-2.0
| 4,646 | 0.005166 |
import os
import platform
from setuptools import setup
# "import" __version__
__version__ = 'unknown'
for line in open('sounddevice.py'):
if line.startswith('__version__'):
exec(line)
break
MACOSX_VERSIONS = '.'.join([
'macosx_10_6_x86_64', # for compatibility with pip < v21
'macosx_10_6_universal2',
])
# environment variables for cross-platform package creation
system = os.environ.get('PYTHON_SOUNDDEVICE_PLATFORM', platform.system())
architecture0 = os.environ.get('PYTHON_SOUNDDEVICE_ARCHITECTURE',
platform.architecture()[0])
if system == 'Darwin':
libname = 'libportaudio.dylib'
elif system == 'Windows':
libname = 'libportaudio' + architecture0 + '.dll'
else:
libname = None
if libname and os.path.isdir('_sounddevice_data/portaudio-binaries'):
packages = ['_sounddevice_data']
package_data = {'_sounddevice_data': ['portaudio-binaries/' + libname,
'portaudio-binaries/README.md']}
zip_safe = False
else:
packages = None
package_data = None
zip_safe = True
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError:
cmdclass = {}
else:
class bdist_wheel_half_pure(bdist_wheel):
"""Create OS-dependent, but Python-independent wheels."""
def get_tag(self):
if system == 'Darwin':
oses = MACOSX_VERSIONS
elif system == 'Windows':
if architecture0 == '32bit':
oses = 'win32'
else:
oses = 'win_amd64'
else:
oses = 'any'
return 'py3', 'none', oses
cmdclass = {'bdist_wheel': bdist_wheel_half_pure}
setup(
name='sounddevice',
version=__version__,
py_modules=['sounddevice'],
packages=packages,
package_data=package_data,
zip_safe=zip_safe,
python_requires='>=3',
setup_requires=['CFFI>=1.0'],
install_requires=['CFFI>=1.0'],
extras_require={'NumPy': ['NumPy']},
cffi_modules=['sounddevice_build.py:ffibuilder'],
author='Matthias Geier',
author_email='Matthias.Geier@gmail.com',
description='Play and Record Sound with Python',
long_description=open('README.rst').read(),
license='MIT',
keywords='sound audio PortAudio play record playrec'.split(),
url='http://python-sounddevice.readthedocs.io/',
platforms='any',
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Multimedia :: Sound/Audio',
],
cmdclass=cmdclass,
)
|
spatialaudio/python-sounddevice
|
setup.py
|
Python
|
mit
| 2,712 | 0 |
#!/usr/bin/env python
"""The MySQL database methods for foreman rule handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_core.lib import rdfvalue
from grr_response_server import foreman_rules
from grr_response_server.databases import mysql_utils
class MySQLDBForemanRulesMixin(object):
"""MySQLDB mixin for foreman rules related functions."""
@mysql_utils.WithTransaction()
def WriteForemanRule(self, rule, cursor=None):
"""Writes a foreman rule to the database."""
query = ("INSERT INTO foreman_rules "
" (hunt_id, expiration_time, rule) "
"VALUES (%s, FROM_UNIXTIME(%s), %s) "
"ON DUPLICATE KEY UPDATE "
" expiration_time=FROM_UNIXTIME(%s), rule=%s")
exp_str = mysql_utils.RDFDatetimeToTimestamp(rule.expiration_time),
rule_str = rule.SerializeToBytes()
cursor.execute(query, [rule.hunt_id, exp_str, rule_str, exp_str, rule_str])
@mysql_utils.WithTransaction()
def RemoveForemanRule(self, hunt_id, cursor=None):
query = "DELETE FROM foreman_rules WHERE hunt_id=%s"
cursor.execute(query, [hunt_id])
@mysql_utils.WithTransaction(readonly=True)
def ReadAllForemanRules(self, cursor=None):
cursor.execute("SELECT rule FROM foreman_rules")
res = []
for rule, in cursor.fetchall():
res.append(foreman_rules.ForemanCondition.FromSerializedBytes(rule))
return res
@mysql_utils.WithTransaction()
def RemoveExpiredForemanRules(self, cursor=None):
now = rdfvalue.RDFDatetime.Now()
cursor.execute(
"DELETE FROM foreman_rules WHERE expiration_time < FROM_UNIXTIME(%s)",
[mysql_utils.RDFDatetimeToTimestamp(now)])
|
demonchild2112/travis-test
|
grr/server/grr_response_server/databases/mysql_foreman_rules.py
|
Python
|
apache-2.0
| 1,746 | 0.005727 |
import traceback
from vstruct2.compat import int2bytes, bytes2int
# This routine was coppied from vivisect to allow vstruct
# to be free from dependencies
MAX_WORD = 16
def initmask(bits):
return (1<<bits)-1
bitmasks = [ initmask(i) for i in range(MAX_WORD*8) ]
def bitmask(value,bits):
return value & bitmasks[bits]
class v_base:
'''
Base class for all VStruct types
'''
def __init__(self):
self._vs_onset = []
self._vs_isprim = True
def __len__(self):
return self.vsSize()
def __bytes__(self):
return self.vsEmit()
def vsOnset(self, callback, *args, **kwargs):
'''
Trigger a callback when the fields value is updated.
NOTE: this callback is triggered during parse() as well
as during value updates.
'''
self._vs_onset.append( (callback,args,kwargs) )
return self
def _fire_onset(self):
for cb,args,kwargs in self._vs_onset:
try:
cb(*args,**kwargs)
except Exception as e:
traceback.print_exc()
class v_prim(v_base):
'''
Base class for all vstruct primitive types
'''
def __init__(self, size=None, valu=None):
v_base.__init__(self)
self._vs_size = size
self._vs_bits = size * 8
self._vs_value = self._prim_norm(valu)
self._vs_parent = None
# on-demand field parsing
self._vs_backfd = None
self._vs_backoff = None
self._vs_backbytes = None
self._vs_writeback = False
def __repr__(self):
return repr(self._prim_getval())
def vsGetTypeName(self):
return self.__class__.__name__
def vsParse(self, bytez, offset=0, writeback=False):
'''
Byte parsing method for VStruct primitives.
'''
self._vs_value = None
self._vs_backoff = offset
self._vs_backbytes = bytez
self._vs_writeback = writeback
retval = offset + self.vsSize()
self._fire_onset()
return retval
def vsLoad(self, fd, offset=0, writeback=False):
self._vs_value = None
self._vs_backfd = fd
self._vs_backoff = offset
self._vs_writeback = writeback
retval = offset + self.vsSize()
self._fire_onset()
return retval
def vsSize(self):
'''
Return the size of the field.
'''
return self._vs_size
def vsResize(self, size):
'''
Resizing callback which can dynamically change the size
of a primitive.
'''
self._vs_size = size
def _prim_setval(self, newval):
valu = self._prim_norm(newval)
self._vs_value = valu
# if requested, write changes back to bytearray / fd
if self._vs_writeback:
byts = self._prim_emit(valu)
if self._vs_backbytes != None:
self._vs_backbytes[ self._vs_backoff:self._vs_backoff + len(byts) ] = byts
if self._vs_backfd != None:
self._vs_backfd.seek( self._vs_backoff )
self._vs_backfd.write( byts )
self._fire_onset()
def _prim_getval(self):
# trigger on-demand parsing if needed
if self._vs_value == None:
if self._vs_backfd:
self._vs_value = self._prim_load(self._vs_backfd, self._vs_backoff)
elif self._vs_backbytes:
self._vs_value = self._prim_parse(self._vs_backbytes, self._vs_backoff)
return self._vs_value
def _prim_load(self, fd, offset):
# easy base case...
fd.seek(offset)
byts = fd.read(self._vs_size)
return self._prim_parse(byts, 0)
def vsEmit(self):
return self._prim_emit( self._prim_getval() )
def _prim_norm(self, x):
raise Exception('Implement Me')
def _prim_emit(self, x):
raise Exception('Implement Me')
def _prim_parse(self, bytez, offset):
raise Exception('Implement Me')
class v_int(v_prim):
def __init__(self,valu=0,size=4,endian='little',signed=False,enum=None):
v_prim.__init__(self,valu=valu,size=size)
self._vs_enum = enum
self._vs_endian = endian
self._vs_signed = signed
def __int__(self):
return self._prim_getval()
def __repr__(self):
valu = self._prim_getval()
if self._vs_enum != None:
enum = self._vs_enum[valu]
if enum != None:
return enum
return repr(valu)
def vsResize(self, size):
self._vs_bits = size * 8
return v_prim.vsResize(self,size)
def _prim_emit(self,x):
return int2bytes(x, self._vs_size, byteorder=self._vs_endian, signed=self._vs_signed)
def _prim_norm(self,x):
return bitmask(x,self._vs_bits)
def _prim_parse(self, byts, offset):
return bytes2int(byts, self._vs_size, byteorder=self._vs_endian, signed=self._vs_signed, off=offset)
|
vivisect/vstruct
|
vstruct2/bases.py
|
Python
|
apache-2.0
| 4,994 | 0.009211 |
import pytest
import tensorflow as tf
import numpy as np
import tfs.core.layer.ops as ops
from tfs.core.layer.dropout import Dropout
from tfs.network import Network
net = Network()
@pytest.fixture
def l():
l = Dropout(
net,
keep_prob=1.0,
)
return l
class TestDropout:
def test_build_inverse(self,l):
_in = tf.zeros([1,10,10,4])
_out=l.build(_in)
assert _out.get_shape().as_list()==[1,10,10,4]
|
crackhopper/TFS-toolbox
|
tests/core/layer/dropout_test.py
|
Python
|
mit
| 426 | 0.044601 |
# coding = utf-8
__author__ = 'Forec'
import xlwt
import re
book = xlwt.Workbook(encoding = 'utf-8', style_compression=0)
sheet = book.add_sheet('student',cell_overwrite_ok = True)
line = 0
info = re.compile(r'\"(\d+)\" : \"(.*?)\"')
with open('city.txt',"r") as f:
data = f.read()
for x in info.findall(data):
for i in range(len(x)):
sheet.write(line,i,x[i])
line+=1
book.save('city.xls')
|
luoxufeiyan/python
|
Forec/0015/0015.py
|
Python
|
mit
| 410 | 0.02439 |
# Copyright (C) 2014 Optiv, Inc. (brad.spengler@optiv.com)
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class InjectionRWX(Signature):
name = "injection_rwx"
description = "Creates RWX memory"
severity = 2
confidence = 50
categories = ["injection"]
authors = ["Optiv"]
minimum = "1.2"
evented = True
def __init__(self, *args, **kwargs):
Signature.__init__(self, *args, **kwargs)
filter_apinames = set(["NtAllocateVirtualMemory","NtProtectVirtualMemory","VirtualProtectEx"])
filter_analysistypes = set(["file"])
def on_call(self, call, process):
if call["api"] == "NtAllocateVirtualMemory" or call["api"] == "VirtualProtectEx":
protection = self.get_argument(call, "Protection")
# PAGE_EXECUTE_READWRITE
if protection == "0x00000040":
return True
elif call["api"] == "NtProtectVirtualMemory":
protection = self.get_argument(call, "NewAccessProtection")
# PAGE_EXECUTE_READWRITE
if protection == "0x00000040":
return True
|
lixiangning888/whole_project
|
modules/signatures_orginal_20151110/injection_rwx.py
|
Python
|
lgpl-3.0
| 1,229 | 0.004068 |
# -*- mode: python; coding: utf-8; -*-
import os
APP_NAME = "SLog"
VERSION = "0.9.4"
WEBSITE = "http://vialinx.org"
LICENSE = """
SLog is a PyGTK-based GUI for the LightLang SL dictionary.
Copyright 2007 Nasyrov Renat <renatn@gmail.com>
This file is part of SLog.
SLog is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
SLog is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
along with SLog; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
INSTALL_PREFIX = "@prefix@"
PIXMAP_DIR = os.path.join(INSTALL_PREFIX, "share", "pixmaps")
LOCALE_DIR = os.path.join(INSTALL_PREFIX, "share", "locale")
DATA_DIR = os.path.join(INSTALL_PREFIX, "share", "slog")
LOGO_ICON = "slog.png"
LOGO_ICON_SPY = "slog_spy.png"
#FTP_LL_URL = "ftp://ftp.lightlang.org.ru/dicts"
FTP_LL_URL = "ftp://etc.edu.ru/pub/soft/for_linux/lightlang"
FTP_DICTS_URL = FTP_LL_URL + "/dicts"
FTP_REPO_URL = FTP_DICTS_URL + "/repodata/primary.xml"
REPO_FILE = os.path.expanduser("~/.config/slog/primary.xml")
SL_TMP_DIR = "/tmp/sl"
def get_icon(filename):
return os.path.join(PIXMAP_DIR, filename)
|
mdevaev/slog
|
src/common.py
|
Python
|
gpl-2.0
| 1,483 | 0.002697 |
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Identity v3 User action implementations"""
import copy
import six
import sys
from keystoneauth1 import exceptions as ks_exc
from eclcli.common import command
from eclcli.common import utils
from eclcli.i18n import _ # noqa
from eclcli.identity import common
class CreateUser(command.ShowOne):
"""Create new user"""
def get_parser(self, prog_name):
parser = super(CreateUser, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<name>',
help='New user name',
)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Default domain (name or ID)',
)
parser.add_argument(
'--project',
metavar='<project>',
help='Default project (name or ID)',
)
common.add_project_domain_option_to_parser(parser)
parser.add_argument(
'--password',
metavar='<password>',
help='Set user password',
)
parser.add_argument(
'--password-prompt',
dest="password_prompt",
action="store_true",
help='Prompt interactively for password',
)
parser.add_argument(
'--email',
metavar='<email-address>',
help='Set user email address',
)
parser.add_argument(
'--description',
metavar='<description>',
help='User description',
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
help='Enable user (default)',
)
enable_group.add_argument(
'--disable',
action='store_true',
help='Disable user',
)
parser.add_argument(
'--or-show',
action='store_true',
help=_('Return existing user'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
project_id = None
if parsed_args.project:
project_id = common.find_project(identity_client,
parsed_args.project,
parsed_args.project_domain).id
domain_id = None
if parsed_args.domain:
domain_id = common.find_domain(identity_client,
parsed_args.domain).id
enabled = True
if parsed_args.disable:
enabled = False
if parsed_args.password_prompt:
parsed_args.password = utils.get_password(self.app.stdin)
try:
user = identity_client.users.create(
name=parsed_args.name,
domain=domain_id,
default_project=project_id,
password=parsed_args.password,
email=parsed_args.email,
description=parsed_args.description,
enabled=enabled
)
except ks_exc.Conflict as e:
if parsed_args.or_show:
user = utils.find_resource(identity_client.users,
parsed_args.name,
domain_id=domain_id)
self.log.info('Returning existing user %s', user.name)
else:
raise e
user._info.pop('links')
return zip(*sorted(six.iteritems(user._info)))
class DeleteUser(command.Command):
"""Delete user(s)"""
def get_parser(self, prog_name):
parser = super(DeleteUser, self).get_parser(prog_name)
parser.add_argument(
'users',
metavar='<user>',
nargs="+",
help='User(s) to delete (name or ID)',
)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Domain owning <user> (name or ID)',
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
domain = None
if parsed_args.domain:
domain = common.find_domain(identity_client, parsed_args.domain)
for user in parsed_args.users:
if domain is not None:
user_obj = utils.find_resource(identity_client.users,
user,
domain_id=domain.id)
else:
user_obj = utils.find_resource(identity_client.users,
user)
identity_client.users.delete(user_obj.id)
class ListUser(command.Lister):
"""List users"""
def get_parser(self, prog_name):
parser = super(ListUser, self).get_parser(prog_name)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Filter users by <domain> (name or ID)',
)
project_or_group = parser.add_mutually_exclusive_group()
project_or_group.add_argument(
'--group',
metavar='<group>',
help='Filter users by <group> membership (name or ID)',
)
project_or_group.add_argument(
'--project',
metavar='<project>',
help='Filter users by <project> (name or ID)',
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help='List additional fields in output',
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
domain = None
if parsed_args.domain:
domain = common.find_domain(identity_client,
parsed_args.domain).id
group = None
if parsed_args.group:
group = common.find_group(identity_client,
parsed_args.group,
parsed_args.domain).id
if parsed_args.project:
if domain is not None:
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
domain_id=domain
).id
else:
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
).id
assignments = identity_client.role_assignments.list(
project=project)
# NOTE(stevemar): If a user has more than one role on a project
# then they will have two entries in the returned data. Since we
# are looking for any role, let's just track unique user IDs.
user_ids = set()
for assignment in assignments:
if hasattr(assignment, 'user'):
user_ids.add(assignment.user['id'])
# NOTE(stevemar): Call find_resource once we have unique IDs, so
# it's fewer trips to the Identity API, then collect the data.
data = []
for user_id in user_ids:
user = utils.find_resource(identity_client.users, user_id)
data.append(user)
else:
data = identity_client.users.list(
domain=domain,
group=group,
)
# Column handling
if parsed_args.long:
columns = ['ID', 'Name', 'Default Project Id', 'Domain Id',
'Description', 'Email', 'Enabled']
column_headers = copy.deepcopy(columns)
column_headers[2] = 'Project'
column_headers[3] = 'Domain'
else:
columns = ['ID', 'Name']
column_headers = columns
return (
column_headers,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data)
)
class SetUser(command.Command):
"""Set user properties"""
def get_parser(self, prog_name):
parser = super(SetUser, self).get_parser(prog_name)
parser.add_argument(
'user',
metavar='<user>',
help='User to change (name or ID)',
)
parser.add_argument(
'--name',
metavar='<name>',
help='Set user name',
)
parser.add_argument(
'--project',
metavar='<project>',
help='Set default project (name or ID)',
)
common.add_project_domain_option_to_parser(parser)
parser.add_argument(
'--password',
metavar='<password>',
help='Set user password',
)
parser.add_argument(
'--password-prompt',
dest="password_prompt",
action="store_true",
help='Prompt interactively for password',
)
parser.add_argument(
'--email',
metavar='<email-address>',
help='Set user email address',
)
parser.add_argument(
'--description',
metavar='<description>',
help='Set user description',
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
help='Enable user (default)',
)
enable_group.add_argument(
'--disable',
action='store_true',
help='Disable user',
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if parsed_args.password_prompt:
parsed_args.password = utils.get_password(self.app.stdin)
if (not parsed_args.name
and not parsed_args.name
and not parsed_args.password
and not parsed_args.email
and not parsed_args.project
and not parsed_args.description
and not parsed_args.enable
and not parsed_args.disable):
sys.stderr.write("Incorrect set of arguments "
"provided. See ecl --help for more "
"details\n")
return
user = utils.find_resource(
identity_client.users,
parsed_args.user,
)
kwargs = {}
if parsed_args.name:
kwargs['name'] = parsed_args.name
if parsed_args.email:
kwargs['email'] = parsed_args.email
if parsed_args.password:
kwargs['password'] = parsed_args.password
if parsed_args.description:
kwargs['description'] = parsed_args.description
if parsed_args.project:
project_id = common.find_project(identity_client,
parsed_args.project,
parsed_args.project_domain).id
kwargs['default_project'] = project_id
kwargs['enabled'] = user.enabled
if parsed_args.enable:
kwargs['enabled'] = True
if parsed_args.disable:
kwargs['enabled'] = False
identity_client.users.update(user.id, **kwargs)
class SetPasswordUser(command.Command):
"""Change current user password"""
def get_parser(self, prog_name):
parser = super(SetPasswordUser, self).get_parser(prog_name)
parser.add_argument(
'--password',
metavar='<new-password>',
help='New user password'
)
parser.add_argument(
'--original-password',
metavar='<original-password>',
help='Original user password'
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
# FIXME(gyee): there are two scenarios:
#
# 1. user update password for himself
# 2. admin update password on behalf of the user. This is an unlikely
# scenario because that will require admin knowing the user's
# original password which is forbidden under most security
# policies.
#
# Of the two scenarios above, user either authenticate using its
# original password or an authentication token. For scenario #1,
# if user is authenticating with its original password (i.e. passing
# --os-password argument), we can just make use of it instead of using
# --original-password or prompting. For scenario #2, admin will need
# to specify --original-password option or this won't work because
# --os-password is the admin's own password. In the future if we stop
# supporting scenario #2 then we can just do this.
#
# current_password = (parsed_args.original_password or
# self.app.cloud.password)
#
current_password = parsed_args.original_password
if current_password is None:
current_password = utils.get_password(
self.app.stdin, prompt="Current Password:", confirm=False)
password = parsed_args.password
if password is None:
password = utils.get_password(
self.app.stdin, prompt="New Password:")
identity_client.users.update_password(current_password, password)
class ShowUser(command.ShowOne):
"""Display user details"""
def get_parser(self, prog_name):
parser = super(ShowUser, self).get_parser(prog_name)
parser.add_argument(
'user',
metavar='<user>',
help='User to display (name or ID)',
)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Domain owning <user> (name or ID)',
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if parsed_args.domain:
domain = common.find_domain(identity_client, parsed_args.domain)
user = utils.find_resource(identity_client.users,
parsed_args.user,
domain_id=domain.id)
else:
user = utils.find_resource(identity_client.users,
parsed_args.user)
user._info.pop('links')
return zip(*sorted(six.iteritems(user._info)))
|
nttcom/eclcli
|
eclcli/identity/v3/user.py
|
Python
|
apache-2.0
| 15,378 | 0 |
from temboo.Library.Zendesk.Search.SearchAll import SearchAll, SearchAllInputSet, SearchAllResultSet, SearchAllChoreographyExecution
from temboo.Library.Zendesk.Search.SearchAnonymous import SearchAnonymous, SearchAnonymousInputSet, SearchAnonymousResultSet, SearchAnonymousChoreographyExecution
|
jordanemedlock/psychtruths
|
temboo/core/Library/Zendesk/Search/__init__.py
|
Python
|
apache-2.0
| 296 | 0.006757 |
import random
## Course texture colors ##
###########################
class Course(object):
def __init__(self, num):
## Default colors, fall back to these
fog = [0,0,0]
light_road = [0,0,0]
dark_road = [0,0,0]
light_offroad = [0,0,0]
dark_offroad = [0,0,0]
light_wall = [0,0,0]
dark_wall = [0,0,0]
light_rumble = [0,0,0]
dark_rumble = [0,0,0]
## Course road geometry
self.geometry = [0,0,0,0,0,0,0,0]
last_seg = 0
## Start with a straightaway by default
## Exactly six "segments" are made
for i in range(7):
## Add a segment that's different from the previous one
self.geometry[i] = last_seg
last_seg += random.choice((-1,1))
if last_seg < 1:
last_seg = 3
elif last_seg > 3:
last_seg = 1
## Length of each segment, larger number = longer strip
self.strip = 3 ## Wall
self.road = 2 ## Road
## Load texture colors to overwrite defaults
f = open("res/stage/%d.dat" %num, "r").readlines()
for line in f:
if line.startswith("fog = "): ## Fog color to fade into
temp = line.strip("fog = ").split(",")
fog = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("light_road = "): ## Light ground strip
temp = line.strip("light_road = ").split(",")
light_road = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("dark_road = "): ## Dark ground strip
temp = line.strip("dark_road = ").split(",")
dark_road = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("light_offroad = "): ## Light offroad strip
temp = line.strip("light_offroad = ").split(",")
light_offroad = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("dark_offroad = "): ## Dark offroad strip
temp = line.strip("dark_offroad = ").split(",")
dark_offroad = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("light_wall = "): ## Light wall strip
temp = line.strip("light_wall = ").split(",")
light_wall = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("dark_wall = "): ## Dark wall strip
temp = line.strip("dark_wall = ").split(",")
dark_wall = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("light_rumble = "): ## Light rumble strip
temp = line.strip("light_rumble = ").split(",")
light_rumble = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("dark_rumble = "): ## Dark rumble strip
temp = line.strip("dark_rumble = ").split(",")
dark_rumble = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("ceiling = "): ## Ceiling
temp = line.strip("ceiling = ").split(",")
ceiling = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("strip = "): ## Length of wall segment
self.strip = int(line.strip("strip = "))
elif line.startswith("road = "): ## Length of road segment
self.road = int(line.strip("road = "))
## Define start line, finish line, and dark and light strip lists
white = [200,200,200]
self.start = [white,white,light_wall, white, fog, white]
self.finish = [white, white, dark_wall, white, fog, white]
self.dark_colors = [dark_road, dark_offroad, dark_wall, dark_rumble, fog, None, ceiling]
self.light_colors = [light_road, light_offroad, light_wall, light_rumble, fog, white, ceiling]
|
navarro0/racing-all-afternoon
|
course.py
|
Python
|
mit
| 3,892 | 0.023124 |
"""
End to end test of the internal reporting user table loading task.
"""
import os
import logging
import datetime
import pandas
from luigi.date_interval import Date
from edx.analytics.tasks.tests.acceptance import AcceptanceTestCase
from edx.analytics.tasks.url import url_path_join
log = logging.getLogger(__name__)
class InternalReportingUserLoadAcceptanceTest(AcceptanceTestCase):
"""End-to-end test of the workflow to load the internal reporting warehouse's user table."""
INPUT_FILE = 'location_by_course_tracking.log'
DATE_INTERVAL = Date(2014, 7, 21)
def setUp(self):
super(InternalReportingUserLoadAcceptanceTest, self).setUp()
# Set up the mock LMS databases.
self.execute_sql_fixture_file('load_auth_user_for_internal_reporting_user.sql')
self.execute_sql_fixture_file('load_auth_userprofile.sql')
# Put up the mock tracking log for user locations.
self.upload_tracking_log(self.INPUT_FILE, datetime.datetime(2014, 7, 21))
def test_internal_reporting_user(self):
"""Tests the workflow for the internal reporting user table, end to end."""
self.task.launch([
'LoadInternalReportingUserToWarehouse',
'--interval', self.DATE_INTERVAL.to_string(),
'--user-country-output', url_path_join(self.test_out, 'user'),
'--n-reduce-tasks', str(self.NUM_REDUCERS),
])
self.validate_output()
def validate_output(self):
"""Validates the output, comparing it to a csv of all the expected output from this workflow."""
with self.vertica.cursor() as cursor:
expected_output_csv = os.path.join(self.data_dir, 'output', 'acceptance_expected_d_user.csv')
expected = pandas.read_csv(expected_output_csv, parse_dates=True)
cursor.execute("SELECT * FROM {schema}.d_user".format(schema=self.vertica.schema_name))
response = cursor.fetchall()
d_user = pandas.DataFrame(response, columns=['user_id', 'user_year_of_birth', 'user_level_of_education',
'user_gender', 'user_email', 'user_username',
'user_account_creation_time',
'user_last_location_country_code'])
try: # A ValueError will be thrown if the column names don't match or the two data frames are not square.
self.assertTrue(all(d_user == expected))
except ValueError:
self.fail("Expected and returned data frames have different shapes or labels.")
|
sssllliang/edx-analytics-pipeline
|
edx/analytics/tasks/tests/acceptance/test_internal_reporting_user.py
|
Python
|
agpl-3.0
| 2,665 | 0.004878 |
# -*- coding: utf-8 -*-
# ERPNext - web based ERP (http://erpnext.com)
# Copyright (C) 2012 Web Notes Technologies Pvt Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# default settings that can be made for a user.
from __future__ import unicode_literals
import frappe
# product_name = "ERPNext"
product_name = "letzERP"
user_defaults = {
"Company": "company",
"Territory": "territory"
}
|
gangadharkadam/letzerp
|
erpnext/startup/__init__.py
|
Python
|
agpl-3.0
| 986 | 0.002028 |
__author__ = 'Sulantha'
import logging
class PipelineLogger:
logFunctions={'info':logging.info,
'debug':logging.debug,
'warning':logging.warning,
'error':logging.error,
'critical':logging.critical,
'exception':logging.exception}
@staticmethod
def log(moduleName, level, message):
level = level.lower()
logging.getLogger(moduleName)
PipelineLogger.logFunctions[level](message)
|
sulantha2006/Processing_Pipeline
|
Utils/PipelineLogger.py
|
Python
|
apache-2.0
| 498 | 0.026104 |
import py
from rpython.rlib.signature import signature, finishsigs, FieldSpec, ClassSpec
from rpython.rlib import types
from rpython.annotator import model
from rpython.rtyper.llannotation import SomePtr
from rpython.annotator.signature import SignatureError
from rpython.translator.translator import TranslationContext, graphof
from rpython.rtyper.lltypesystem import rstr
from rpython.rtyper.annlowlevel import LowLevelAnnotatorPolicy
def annotate_at(f, policy=None):
t = TranslationContext()
t.config.translation.check_str_without_nul = True
a = t.buildannotator(policy=policy)
a.annotate_helper(f, [model.s_ImpossibleValue]*f.func_code.co_argcount, policy=policy)
return a
def sigof(a, f):
# returns [param1, param2, ..., ret]
g = graphof(a.translator, f)
return [a.binding(v) for v in g.startblock.inputargs] + [a.binding(g.getreturnvar())]
def getsig(f, policy=None):
a = annotate_at(f, policy=policy)
return sigof(a, f)
def check_annotator_fails(caller):
exc = py.test.raises(model.AnnotatorError, annotate_at, caller).value
assert caller.func_name in str(exc)
def test_bookkeeping():
@signature('x', 'y', returns='z')
def f(a, b):
return a + len(b)
f.foo = 'foo'
assert f._signature_ == (('x', 'y'), 'z')
assert f.func_name == 'f'
assert f.foo == 'foo'
assert f(1, 'hello') == 6
def test_basic():
@signature(types.int(), types.str(), returns=types.char())
def f(a, b):
return b[a]
assert getsig(f) == [model.SomeInteger(), model.SomeString(), model.SomeChar()]
def test_arg_errors():
@signature(types.int(), types.str(), returns=types.int())
def f(a, b):
return a + len(b)
@check_annotator_fails
def ok_for_body(): # would give no error without signature
f(2.0, 'b')
@check_annotator_fails
def bad_for_body(): # would give error inside 'f' body, instead errors at call
f('a', 'b')
def test_return():
@signature(returns=types.str())
def f():
return 'a'
assert getsig(f) == [model.SomeString()]
@signature(types.str(), returns=types.str())
def f(x):
return x
def g():
return f('a')
a = annotate_at(g)
assert sigof(a, f) == [model.SomeString(), model.SomeString()]
def test_return_errors():
@check_annotator_fails
@signature(returns=types.int())
def int_not_char():
return 'a'
@check_annotator_fails
@signature(types.str(), returns=types.int())
def str_to_int(s):
return s
@signature(returns=types.str())
def str_not_None():
return None
@check_annotator_fails
def caller_of_str_not_None():
return str_not_None()
@py.test.mark.xfail
def test_return_errors_xfail():
@check_annotator_fails
@signature(returns=types.str())
def str_not_None():
return None
def test_none():
@signature(returns=types.none())
def f():
pass
assert getsig(f) == [model.s_None]
def test_float():
@signature(types.longfloat(), types.singlefloat(), returns=types.float())
def f(a, b):
return 3.0
assert getsig(f) == [model.SomeLongFloat(), model.SomeSingleFloat(), model.SomeFloat()]
def test_unicode():
@signature(types.unicode(), returns=types.int())
def f(u):
return len(u)
assert getsig(f) == [model.SomeUnicodeString(), model.SomeInteger()]
def test_str0():
@signature(types.unicode0(), returns=types.str0())
def f(u):
return 'str'
assert getsig(f) == [model.SomeUnicodeString(no_nul=True),
model.SomeString(no_nul=True)]
def test_ptr():
policy = LowLevelAnnotatorPolicy()
@signature(types.ptr(rstr.STR), returns=types.none())
def f(buf):
pass
argtype = getsig(f, policy=policy)[0]
assert isinstance(argtype, SomePtr)
assert argtype.ll_ptrtype.TO == rstr.STR
def g():
f(rstr.mallocstr(10))
getsig(g, policy=policy)
def test_list():
@signature(types.list(types.int()), returns=types.int())
def f(a):
return len(a)
argtype = getsig(f)[0]
assert isinstance(argtype, model.SomeList)
item = argtype.listdef.listitem
assert item.s_value == model.SomeInteger()
assert item.resized == True
@check_annotator_fails
def ok_for_body():
f(['a'])
@check_annotator_fails
def bad_for_body():
f('a')
@signature(returns=types.list(types.char()))
def ff():
return ['a']
@check_annotator_fails
def mutate_broader():
ff()[0] = 'abc'
@check_annotator_fails
def mutate_unrelated():
ff()[0] = 1
@check_annotator_fails
@signature(types.list(types.char()), returns=types.int())
def mutate_in_body(l):
l[0] = 'abc'
return len(l)
def can_append():
l = ff()
l.append('b')
getsig(can_append)
def test_array():
@signature(returns=types.array(types.int()))
def f():
return [1]
rettype = getsig(f)[0]
assert isinstance(rettype, model.SomeList)
item = rettype.listdef.listitem
assert item.s_value == model.SomeInteger()
assert item.resized == False
def try_append():
l = f()
l.append(2)
check_annotator_fails(try_append)
def test_dict():
@signature(returns=types.dict(types.str(), types.int()))
def f():
return {'a': 1, 'b': 2}
rettype = getsig(f)[0]
assert isinstance(rettype, model.SomeDict)
assert rettype.dictdef.dictkey.s_value == model.SomeString()
assert rettype.dictdef.dictvalue.s_value == model.SomeInteger()
def test_instance():
class C1(object):
pass
class C2(C1):
pass
class C3(C2):
pass
@signature(types.instance(C3), returns=types.instance(C2))
def f(x):
assert isinstance(x, C2)
return x
argtype, rettype = getsig(f)
assert isinstance(argtype, model.SomeInstance)
assert argtype.classdef.classdesc.pyobj == C3
assert isinstance(rettype, model.SomeInstance)
assert rettype.classdef.classdesc.pyobj == C2
@check_annotator_fails
def ok_for_body():
f(C2())
@check_annotator_fails
def bad_for_body():
f(C1())
@check_annotator_fails
def ok_for_body():
f(None)
def test_instance_or_none():
class C1(object):
pass
class C2(C1):
pass
class C3(C2):
pass
@signature(types.instance(C3, can_be_None=True), returns=types.instance(C2, can_be_None=True))
def f(x):
assert isinstance(x, C2) or x is None
return x
argtype, rettype = getsig(f)
assert isinstance(argtype, model.SomeInstance)
assert argtype.classdef.classdesc.pyobj == C3
assert argtype.can_be_None
assert isinstance(rettype, model.SomeInstance)
assert rettype.classdef.classdesc.pyobj == C2
assert rettype.can_be_None
@check_annotator_fails
def ok_for_body():
f(C2())
@check_annotator_fails
def bad_for_body():
f(C1())
def test_self():
@finishsigs
class C(object):
@signature(types.self(), types.self(), returns=types.none())
def f(self, other):
pass
class D1(C):
pass
class D2(C):
pass
def g():
D1().f(D2())
a = annotate_at(g)
argtype = sigof(a, C.__dict__['f'])[0]
assert isinstance(argtype, model.SomeInstance)
assert argtype.classdef.classdesc.pyobj == C
def test_self_error():
class C(object):
@signature(types.self(), returns=types.none())
def incomplete_sig_meth(self):
pass
exc = py.test.raises(SignatureError, annotate_at, C.incomplete_sig_meth).value
assert 'incomplete_sig_meth' in str(exc)
assert 'finishsigs' in str(exc)
def test_any_as_argument():
@signature(types.any(), types.int(), returns=types.float())
def f(x, y):
return x + y
@signature(types.int(), returns=types.float())
def g(x):
return f(x, x)
sig = getsig(g)
assert sig == [model.SomeInteger(), model.SomeFloat()]
@signature(types.float(), returns=types.float())
def g(x):
return f(x, 4)
sig = getsig(g)
assert sig == [model.SomeFloat(), model.SomeFloat()]
@signature(types.str(), returns=types.int())
def cannot_add_string(x):
return f(x, 2)
exc = py.test.raises(model.AnnotatorError, annotate_at, cannot_add_string).value
assert 'Blocked block' in str(exc)
def test_return_any():
@signature(types.int(), returns=types.any())
def f(x):
return x
sig = getsig(f)
assert sig == [model.SomeInteger(), model.SomeInteger()]
@signature(types.str(), returns=types.any())
def cannot_add_string(x):
return f(3) + x
exc = py.test.raises(model.AnnotatorError, annotate_at, cannot_add_string).value
assert 'Blocked block' in str(exc)
assert 'cannot_add_string' in str(exc)
@py.test.mark.xfail
def test_class_basic():
class C(object):
_fields_ = ClassSpec({'x': FieldSpec(types.int)})
def wrong_type():
c = C()
c.x = 'a'
check_annotator_fails(wrong_type)
def bad_field():
c = C()
c.y = 3
check_annotator_fails(bad_field)
@py.test.mark.xfail
def test_class_shorthand():
class C1(object):
_fields_ = {'x': FieldSpec(types.int)}
def wrong_type_1():
c = C1()
c.x = 'a'
check_annotator_fails(wrong_type_1)
class C2(object):
_fields_ = ClassSpec({'x': types.int})
def wrong_type_2():
c = C2()
c.x = 'a'
check_annotator_fails(wrong_type_1)
@py.test.mark.xfail
def test_class_inherit():
class C(object):
_fields_ = ClassSpec({'x': FieldSpec(types.int)})
class C1(object):
_fields_ = ClassSpec({'y': FieldSpec(types.int)})
class C2(object):
_fields_ = ClassSpec({'y': FieldSpec(types.int)}, inherit=True)
def no_inherit():
c = C1()
c.x = 3
check_annotator_fails(no_inherit)
def good():
c = C2()
c.x = 3
annotate_at(good)
def wrong_type():
c = C2()
c.x = 'a'
check_annotator_fails(wrong_type)
|
oblique-labs/pyVM
|
rpython/rlib/test/test_signature.py
|
Python
|
mit
| 10,192 | 0.005789 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# sin título.py
#
# Copyright 2012 Jesús Hómez <jesus@soneview>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from modelo import Model
#Model().conectar()
#Model().borrar_tablas_para_inventario()
#~ Model().crear_tablas_para_inventario(2011,12)
#~ Model().crear_tabla_entradas()
venta_id = Model().venta_id_max()
new_venta_id = Model().nuevo_venta_id()
print venta_id
print new_venta_id
|
jehomez/pymeadmin
|
prueba_de_inventario.py
|
Python
|
gpl-2.0
| 1,191 | 0.004209 |
"""Command line tool for creating audioMD metadata."""
from __future__ import unicode_literals, print_function
import os
import sys
import click
import six
import audiomd
from siptools.mdcreator import MetsSectionCreator
from siptools.utils import fix_missing_metadata, scrape_file
click.disable_unicode_literals_warning = True
FILEDATA_KEYS = ['audio_data_encoding', 'bits_per_sample',
'data_rate', 'data_rate_mode', 'sampling_frequency']
AUDIOINFO_KEYS = ['duration', 'num_channels']
ALLOW_UNAV = ['audio_data_encoding', 'codec_creator_app',
'codec_creator_app_version', 'codec_name',
'duration', 'num_channels']
ALLOW_ZERO = ['bits_per_sample', 'data_rate', 'sampling_frequency']
@click.command()
@click.argument(
'filename', type=str)
@click.option(
'--workspace', type=click.Path(exists=True),
default='./workspace/',
metavar='<WORKSPACE PATH>',
help="Workspace directory for the metadata files. "
"Defaults to ./workspace/")
@click.option(
'--base_path', type=click.Path(exists=True), default='.',
metavar='<BASE PATH>',
help="Source base path of digital objects. If used, give path to "
"the file in relation to this base path.")
def main(filename, workspace, base_path):
"""Write audioMD metadata for an audio file or streams.
FILENAME: Relative path to the file from current directory or from
--base_path.
"""
create_audiomd(filename, workspace, base_path)
return 0
def create_audiomd(filename, workspace="./workspace/", base_path="."):
"""
Write audioMD metadata for an audio file or streams.
:filename: Audio file path relative to base path
:workspace: Workspace path
:base_path: Base path
"""
filerel = os.path.normpath(filename)
filepath = os.path.normpath(os.path.join(base_path, filename))
creator = AudiomdCreator(workspace)
creator.add_audiomd_md(filepath, filerel)
creator.write()
class AudiomdCreator(MetsSectionCreator):
"""
Subclass of MetsSectionCreator, which generates audioMD metadata for audio
files.
"""
def add_audiomd_md(self, filepath, filerel=None):
"""Create audioMD metadata for a audio file and append it
to self.md_elements.
If a file is not a video container, then the audio stream metadata is
processed in file level. Video container includes streams which need
to be processed separately one at a time.
:filepath: Audio file path
:filerel: Audio file path relative to base path
"""
# Create audioMD metadata
audiomd_dict = create_audiomd_metadata(
filepath, filerel, self.workspace
)
if '0' in audiomd_dict and len(audiomd_dict) == 1:
self.add_md(metadata=audiomd_dict['0'],
filename=(filerel if filerel else filepath))
else:
for index, audio in six.iteritems(audiomd_dict):
self.add_md(metadata=audio,
filename=(filerel if filerel else filepath),
stream=index)
# pylint: disable=too-many-arguments
def write(self, mdtype="OTHER", mdtypeversion="2.0",
othermdtype="AudioMD", section=None, stdout=False,
file_metadata_dict=None,
ref_file="create-audiomd-md-references.jsonl"):
"""
Write AudioMD metadata.
"""
super(AudiomdCreator, self).write(
mdtype=mdtype, mdtypeversion=mdtypeversion,
othermdtype=othermdtype, ref_file=ref_file
)
def create_audiomd_metadata(filename, filerel=None, workspace=None,
streams=None):
"""Creates and returns list of audioMD XML sections.
:filename: Audio file path
:filrel: Audio file path relative to base path
:workspace: Workspace path
:streams: Metadata dict of streams. Will be created if None.
:returns: Dict of AudioMD XML sections.
"""
if streams is None:
(streams, _, _) = scrape_file(filepath=filename,
filerel=filerel,
workspace=workspace,
skip_well_check=True)
fix_missing_metadata(streams, filename, ALLOW_UNAV, ALLOW_ZERO)
audiomd_dict = {}
for index, stream_md in six.iteritems(streams):
if stream_md['stream_type'] != 'audio':
continue
stream_md = _fix_data_rate(stream_md)
file_data_elem = _get_file_data(stream_md)
audio_info_elem = _get_audio_info(stream_md)
audiomd_elem = audiomd.create_audiomd(
file_data=file_data_elem,
audio_info=audio_info_elem
)
audiomd_dict[six.text_type(index)] = audiomd_elem
if not audiomd_dict:
print('The file has no audio streams. No AudioMD metadata created.')
return None
return audiomd_dict
def _fix_data_rate(stream_dict):
"""Changes the data_rate to an integer if it is of a
float type by rounding the number. The value is saved as
a string in the dictionary.
:stream_dict: Metadata dict of a stream
:returns: Fixed metadata dict
"""
for key in stream_dict:
if key == 'data_rate':
data_rate = stream_dict[key]
if data_rate:
try:
data_rate = float(data_rate)
stream_dict['data_rate'] = six.text_type(int(round(data_rate)))
except ValueError:
pass
return stream_dict
def _get_file_data(stream_dict):
"""Creates and returns the fileData XML element.
:stream_dict: Metadata dict of a stream
:returns: AudioMD fileData element
"""
params = {}
for key in FILEDATA_KEYS:
keyparts = key.split('_')
camel_key = keyparts[0] + ''.join(x.title() for x in keyparts[1:])
params[camel_key] = stream_dict[key]
compression = (stream_dict['codec_creator_app'],
stream_dict['codec_creator_app_version'],
stream_dict['codec_name'],
stream_dict['codec_quality'])
params['compression'] = audiomd.amd_compression(*compression)
return audiomd.amd_file_data(params)
def _get_audio_info(stream_dict):
"""Creates and returns the audioInfo XML element.
:stream_dict: Metadata dict of a stream
:returns: AudioMD audioInfo element
"""
return audiomd.amd_audio_info(
duration=stream_dict['duration'],
num_channels=stream_dict['num_channels'])
if __name__ == '__main__':
RETVAL = main() # pylint: disable=no-value-for-parameter
sys.exit(RETVAL)
|
Digital-Preservation-Finland/dpres-siptools
|
siptools/scripts/create_audiomd.py
|
Python
|
lgpl-3.0
| 6,706 | 0 |
import logging
import StringIO
from iso8601 import parse_date
from datetime import datetime
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from lr.model.base_model import appConfig
from lr.lib.base import BaseController, render
import json
import ijson
import collections, sys
import math
from urllib2 import urlopen,HTTPError
import lr.lib.helpers as h
log = logging.getLogger(__name__)
import couchdb
class ExtractController(BaseController):
"""REST Controller styled on the Atom Publishing Protocol"""
# To properly map this controller, ensure your config/routing.py
# file has a resource setup:
# map.resource('extract', 'extract')
def _getView(self,view='_all_docs',keys=[], includeDocs=True,startKey=None,endKey=None):
args = {'include_docs':includeDocs}
if len(keys) > 0:
args['keys'] = keys
args['reduce'] = False
args['stale'] = appConfig['couchdb.stale.flag']
if startKey is not None:
args['startkey'] = startKey
if endKey is not None:
args['endkey'] = endKey
db_url = '/'.join([appConfig['couchdb.url'],appConfig['couchdb.db.resourcedata']])
view = h.getResponse(database_url=db_url,view_name=view,**args)
return view
def _convertDateTime(self, dt):
try:
epoch = parse_date("1970-01-01T00:00:00Z")
if isinstance(dt, str) or isinstance(dt,unicode):
dt = parse_date(dt)
dt = dt - epoch
return int(math.floor(dt.total_seconds()))
except:
abort(500,"Invalid Date Format")
def _processRequest(self,startKey, endKey,urlBase, includeDocs=True):
def streamResult(resp):
CHUNK_SIZE=1024
data = resp.read(CHUNK_SIZE)
while len(data) > 0:
yield data
data = resp.read(CHUNK_SIZE)
try:
resp = self._getView(urlBase,startKey=startKey,endKey=endKey,includeDocs=includeDocs)
return streamResult(resp)
except HTTPError as ex:
abort(404, "not found")
def _orderParmaByView(self,params,view):
def makeEndKey(key):
from copy import deepcopy
newkey = deepcopy(key)
#if complex key
if isinstance(newkey, list):
# get last element in key
last = newkey[-1]
# if the last element is a list, just append an empty object to the last element's list
if isinstance(last, list):
last.append({})
# if the last element in an object, it becomes a bit tricky
# *** note that the key parameter MUST have been originally json parsed with the object_pairs_hook=collections.OrderedDict otherwise
# key order won't be guaranteed to be the same as what CouchDB will use!!!!
elif isinstance(last, dict):
lastkey = last.keys()[-1]
# since there's no easy way to increment a float accurately, instead append a new key that 'should' sort after the previous key.
if (isinstance(last[lastkey], float)):
last[lastkey+u'\ud7af'] = None
# if it's something else... this thing should recurse and keep going.
else:
last[lastkey] = makeEndKey(last[lastkey])
# if we got here, it's nothing really special, so we'll just append a {} to newkey
else:
newkey.append({})
# this if to handle the odd case where we have string as either the key or the value of an object in a complex key.
elif isinstance(newkey, (str, unicode, basestring)):
newkey=newkey+u'\ud7af'
# integer... so just increment 1.
elif isinstance(newkey, int):
newkey += 1
# if we skipped everything else - we don't have a strategy to deal with it... so don't
return newkey
def makeStartsWithEndKey(key):
from copy import deepcopy
newkey = deepcopy(key)
# this the base case for keys that are just strings, append the funky unicode char so that it grabs everything from
# "foo" to "foo\ud7af", which is technically the only way we know how to deal with starts with.
if isinstance(newkey, (str, unicode, basestring)):
newkey=newkey+u'\ud7af'
# if this is a complex key, then get the last element and recurse
elif isinstance(newkey, list):
newkey[-1] = makeStartsWithEndKey(newkey[-1])
# if the last element in an object, it becomes a bit tricky, because you must modify the last key, which implies
# order of keys was maintained when the value was originally parsed.
# *** IMPORTANT: The key parameter MUST have been originally json parsed with the object_pairs_hook=collections.OrderedDict otherwise
# *** key order won't be guaranteed to be the same as what CouchDB will use!!!!
elif isinstance(last, dict):
lastkey = last.keys()[-1]
#take the value from the last key and recurse.
last[lastkey] = makeEndKey(last[lastkey])
# if we skipped everything else - we don't have a strategy to deal with it as a Starts With key, so just return
else:
newkey = key
return newkey
def hasParamFor(funcName):
if funcName == 'ts' and ('from' in params or 'until' in params):
return True
elif funcName == 'discriminator' and ('discriminator' in params or 'discriminator-starts-with' in params):
return True
elif funcName == 'resource' and ('resource' in params or 'resource-starts-with' in params):
return True
else:
return False
def populateTs(startKey, endKey, pos, isLast):
if 'from' in params:
startKey.append(self._convertDateTime(params['from']))
elif pos == 1:
startKey.append(self._convertDateTime(datetime.min.isoformat() + "Z"))
if 'until' in params:
endKey.append(self._convertDateTime(params['until']))
elif pos == 1:
endKey.append(self._convertDateTime(datetime.utcnow().isoformat()+"Z"))
return startKey, endKey
def populateDiscriminator(startKey, endKey, pos, isLast):
if 'discriminator' in params:
# preserve key order!!!
try:
discriminator = json.loads(params['discriminator'], object_pairs_hook=collections.OrderedDict)
except:
log.error(sys.exc_info()[0])
discriminator = params['discriminator']
startKey.append(discriminator)
endKey.append(discriminator)
elif 'discriminator-starts-with' in params:
# preserve key order!!!
try:
discriminator = json.loads(params['discriminator-starts-with'], object_pairs_hook=collections.OrderedDict)
except:
log.error(sys.exc_info()[0])
discriminator = params['discriminator-starts-with']
startKey.append(discriminator)
endKey.append(discriminator)
endKey = makeStartsWithEndKey(endKey)
return startKey, endKey
# else:
# startKey.append('')
# endKey.append(u'\ud7af')
def populateResource(startKey, endKey, pos, isLast):
if 'resource' in params:
startKey.append(params['resource'])
endKey.append(params['resource'])
elif 'resource-starts-with' in params:
startKey.append(params['resource-starts-with'])
endKey.append(params['resource-starts-with']+u'\ud7af')
return startKey, endKey
# else:
# startKey.append('')
# endKey.append(u'\ud7af')
startKey=[]
endKey=[]
includeDocs = True
if "ids_only" in params:
includeDocs = not params
funcs = {
"discriminator":populateDiscriminator,
'resource':populateResource,
'ts':populateTs
}
queryOrderParts = view.split('-by-')
aggregate = queryOrderParts[0]
queryParams= queryOrderParts[1].split('-')
# if we don't have explicit params for this, then omit.
if hasParamFor(aggregate):
queryParams.append(aggregate)
log.error("added aggregate")
for pos, q in enumerate(queryParams,start=1):
startkey, endKey = funcs[q](startKey, endKey, pos, len(queryParams)==pos)
if len(endKey) > 0 and 'resource-starts-with' not in params and 'discriminator-starts-with' not in params:
log.error("making endkey")
endKey = makeEndKey(endKey)
# startkey, endKey = funcs[aggregate](startKey, endKey, len(queryParams)+1, True)
return startKey if len(startKey) > 0 else None, endKey if len(endKey) > 0 else None, includeDocs
def get(self, dataservice="",view='',list=''):
"""GET /extract/id: Show a specific intem"""
try:
db_url = '/'.join([appConfig['couchdb.url'],appConfig['couchdb.db.resourcedata']])
db = couchdb.Database(db_url)
dsDocument = db['_design/'+dataservice]
if "dataservices" not in dsDocument:
abort(406, "Invalid Data Service")
log.error("no dataservices element in document")
urlBase = "_design/{0}/_list/{1}/{2}".format(dataservice,list,view)
startKey, endKey,includeDocs = self._orderParmaByView(request.params,view)
return self._processRequest(startKey,endKey,urlBase,includeDocs)
except couchdb.ResourceNotFound as ex:
abort(406,"Invalid Data Service")
log.error(ex)
|
jimklo/LearningRegistry
|
LR/lr/controllers/extract.py
|
Python
|
apache-2.0
| 10,398 | 0.009233 |
# -*- coding: utf-8 -*-
__all__ = ("clear_tags", "get_text_from_html", "clear_text")
def clear_tags(obj):
"""
Remove not used blocks, such as table of contents, advertisements
"""
SEARCH_TAG = ["div", "table"]
for i in SEARCH_TAG:
res= obj.soup(i)
for row in res:
if row["align"]=="right":
row.clear()
res= obj.soup("title")
if len(res):
res[0].clear()
def join_rows(obj):
"""
Join formatted rows into paragraphs.
Need check splitted words.
Skipped in this solution
"""
pass
def get_text_from_html(obj):
"""
Return text without html tags
"""
obj.text = obj.soup.get_text()
def clear_text(obj):
"""
Remove special/not used symbols
"""
obj.text = obj.text.replace("\t", "")
|
Korotkin/text_processor_sample
|
src/plugins.py
|
Python
|
gpl-3.0
| 821 | 0.004872 |
from django.db import models as dbmodels, connection
from django.utils import datastructures
from autotest.frontend.afe import model_logic, readonly_connection
_quote_name = connection.ops.quote_name
class TempManager(model_logic.ExtendedManager):
_GROUP_COUNT_NAME = 'group_count'
def _get_key_unless_is_function(self, field):
if '(' in field:
return field
return self.get_key_on_this_table(field)
def _get_field_names(self, fields, extra_select_fields={}):
field_names = []
for field in fields:
if field in extra_select_fields:
field_names.append(extra_select_fields[field][0])
else:
field_names.append(self._get_key_unless_is_function(field))
return field_names
def _get_group_query_sql(self, query, group_by):
compiler = query.query.get_compiler(using=query.db)
sql, params = compiler.as_sql()
# insert GROUP BY clause into query
group_fields = self._get_field_names(group_by, query.query.extra_select)
group_by_clause = ' GROUP BY ' + ', '.join(group_fields)
group_by_position = sql.rfind('ORDER BY')
if group_by_position == -1:
group_by_position = len(sql)
sql = (sql[:group_by_position] +
group_by_clause + ' ' +
sql[group_by_position:])
return sql, params
def _get_column_names(self, cursor):
"""
Gets the column names from the cursor description. This method exists
so that it can be mocked in the unit test for sqlite3 compatibility.
"""
return [column_info[0] for column_info in cursor.description]
def execute_group_query(self, query, group_by):
"""
Performs the given query grouped by the fields in group_by with the
given query's extra select fields added. Returns a list of dicts, where
each dict corresponds to single row and contains a key for each grouped
field as well as all of the extra select fields.
"""
sql, params = self._get_group_query_sql(query, group_by)
cursor = readonly_connection.connection().cursor()
cursor.execute(sql, params)
field_names = self._get_column_names(cursor)
row_dicts = [dict(zip(field_names, row)) for row in cursor.fetchall()]
return row_dicts
def get_count_sql(self, query):
"""
Get the SQL to properly select a per-group count of unique matches for
a grouped query. Returns a tuple (field alias, field SQL)
"""
if query.query.distinct:
pk_field = self.get_key_on_this_table()
count_sql = 'COUNT(DISTINCT %s)' % pk_field
else:
count_sql = 'COUNT(1)'
return self._GROUP_COUNT_NAME, count_sql
def _get_num_groups_sql(self, query, group_by):
group_fields = self._get_field_names(group_by, query.query.extra_select)
query = query.order_by() # this can mess up the query and isn't needed
compiler = query.query.get_compiler(using=query.db)
sql, params = compiler.as_sql()
from_ = sql[sql.find(' FROM'):]
return ('SELECT DISTINCT %s %s' % (','.join(group_fields),
from_),
params)
def _cursor_rowcount(self, cursor):
"""To be stubbed by tests"""
return cursor.rowcount
def get_num_groups(self, query, group_by):
"""
Returns the number of distinct groups for the given query grouped by the
fields in group_by.
"""
sql, params = self._get_num_groups_sql(query, group_by)
cursor = readonly_connection.connection().cursor()
cursor.execute(sql, params)
return self._cursor_rowcount(cursor)
class Machine(dbmodels.Model):
'''
A machine used to run a test
'''
#: A numeric and automatic integer that uniquely identifies a given
#: machine. This is the primary key for the resulting table created
#: from this model.
machine_idx = dbmodels.AutoField(primary_key=True)
#: The name, such as a FQDN, of the machine that run the test. Must be
#: unique.
hostname = dbmodels.CharField(unique=True, max_length=255)
#: the machine group
machine_group = dbmodels.CharField(blank=True, max_length=240)
#: the machine owner
owner = dbmodels.CharField(blank=True, max_length=240)
class Meta:
db_table = 'tko_machines'
class Kernel(dbmodels.Model):
'''
The Linux Kernel used during a test
'''
#: A numeric and automatic integer that uniquely identifies a given
#: machine. This is the primary key for the resulting table created
#: from this model.
kernel_idx = dbmodels.AutoField(primary_key=True)
#: the kernel hash
kernel_hash = dbmodels.CharField(max_length=105, editable=False)
#: base
base = dbmodels.CharField(max_length=90)
#: printable
printable = dbmodels.CharField(max_length=300)
class Meta:
db_table = 'tko_kernels'
class Patch(dbmodels.Model):
'''
A Patch applied to a Linux Kernel source during the build process
'''
#: A reference to a :class:`Kernel`
kernel = dbmodels.ForeignKey(Kernel, db_column='kernel_idx')
#: A descriptive name for the patch
name = dbmodels.CharField(blank=True, max_length=240)
#: The URL where the patch was fetched from
url = dbmodels.CharField(blank=True, max_length=900)
#: hash
the_hash = dbmodels.CharField(blank=True, max_length=105, db_column='hash')
class Meta:
db_table = 'tko_patches'
class Status(dbmodels.Model):
'''
The possible results of a test
These objects are populated automatically from a
:ref:`fixture file <django:initial-data-via-fixtures>`
'''
#: A numeric and automatic integer that uniquely identifies a given
#: machine. This is the primary key for the resulting table created
#: from this model.
status_idx = dbmodels.AutoField(primary_key=True)
#: A short descriptive name for the status. This exact name is searched for
#: while the TKO parser is reading and parsing status files
word = dbmodels.CharField(max_length=30)
class Meta:
db_table = 'tko_status'
class Job(dbmodels.Model, model_logic.ModelExtensions):
"""
A test job, having one or many tests an their results
"""
job_idx = dbmodels.AutoField(primary_key=True)
tag = dbmodels.CharField(unique=True, max_length=100)
label = dbmodels.CharField(max_length=300)
username = dbmodels.CharField(max_length=240)
machine = dbmodels.ForeignKey(Machine, db_column='machine_idx')
queued_time = dbmodels.DateTimeField(null=True, blank=True)
started_time = dbmodels.DateTimeField(null=True, blank=True)
finished_time = dbmodels.DateTimeField(null=True, blank=True)
#: If this job was scheduled through the AFE application, this points
#: to the related :class:`autotest.frontend.afe.models.Job` object
afe_job_id = dbmodels.IntegerField(null=True, default=None)
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_jobs'
class JobKeyval(dbmodels.Model):
job = dbmodels.ForeignKey(Job)
key = dbmodels.CharField(max_length=90)
value = dbmodels.CharField(blank=True, max_length=300)
class Meta:
db_table = 'tko_job_keyvals'
class Test(dbmodels.Model, model_logic.ModelExtensions,
model_logic.ModelWithAttributes):
test_idx = dbmodels.AutoField(primary_key=True)
job = dbmodels.ForeignKey(Job, db_column='job_idx')
test = dbmodels.CharField(max_length=300)
subdir = dbmodels.CharField(blank=True, max_length=300)
kernel = dbmodels.ForeignKey(Kernel, db_column='kernel_idx')
status = dbmodels.ForeignKey(Status, db_column='status')
reason = dbmodels.CharField(blank=True, max_length=3072)
machine = dbmodels.ForeignKey(Machine, db_column='machine_idx')
finished_time = dbmodels.DateTimeField(null=True, blank=True)
started_time = dbmodels.DateTimeField(null=True, blank=True)
objects = model_logic.ExtendedManager()
def _get_attribute_model_and_args(self, attribute):
return TestAttribute, dict(test=self, attribute=attribute,
user_created=True)
def set_attribute(self, attribute, value):
# ensure non-user-created attributes remain immutable
try:
TestAttribute.objects.get(test=self, attribute=attribute,
user_created=False)
raise ValueError('Attribute %s already exists for test %s and is '
'immutable' % (attribute, self.test_idx))
except TestAttribute.DoesNotExist:
super(Test, self).set_attribute(attribute, value)
class Meta:
db_table = 'tko_tests'
class TestAttribute(dbmodels.Model, model_logic.ModelExtensions):
test = dbmodels.ForeignKey(Test, db_column='test_idx')
attribute = dbmodels.CharField(max_length=90)
value = dbmodels.CharField(blank=True, max_length=300)
user_created = dbmodels.BooleanField(default=False)
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_test_attributes'
class IterationAttribute(dbmodels.Model, model_logic.ModelExtensions):
# this isn't really a primary key, but it's necessary to appease Django
# and is harmless as long as we're careful
test = dbmodels.ForeignKey(Test, db_column='test_idx', primary_key=True)
iteration = dbmodels.IntegerField()
attribute = dbmodels.CharField(max_length=90)
value = dbmodels.CharField(blank=True, max_length=300)
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_iteration_attributes'
class IterationResult(dbmodels.Model, model_logic.ModelExtensions):
# see comment on IterationAttribute regarding primary_key=True
test = dbmodels.ForeignKey(Test, db_column='test_idx', primary_key=True)
iteration = dbmodels.IntegerField()
attribute = dbmodels.CharField(max_length=90)
value = dbmodels.FloatField(null=True, blank=True)
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_iteration_result'
class TestLabel(dbmodels.Model, model_logic.ModelExtensions):
name = dbmodels.CharField(max_length=80, unique=True)
description = dbmodels.TextField(blank=True)
tests = dbmodels.ManyToManyField(Test, blank=True,
db_table='tko_test_labels_tests')
name_field = 'name'
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_test_labels'
class SavedQuery(dbmodels.Model, model_logic.ModelExtensions):
# TODO: change this to foreign key once DBs are merged
owner = dbmodels.CharField(max_length=80)
name = dbmodels.CharField(max_length=100)
url_token = dbmodels.TextField()
class Meta:
db_table = 'tko_saved_queries'
class EmbeddedGraphingQuery(dbmodels.Model, model_logic.ModelExtensions):
url_token = dbmodels.TextField(null=False, blank=False)
graph_type = dbmodels.CharField(max_length=16, null=False, blank=False)
params = dbmodels.TextField(null=False, blank=False)
last_updated = dbmodels.DateTimeField(null=False, blank=False,
editable=False)
# refresh_time shows the time at which a thread is updating the cached
# image, or NULL if no one is updating the image. This is used so that only
# one thread is updating the cached image at a time (see
# graphing_utils.handle_plot_request)
refresh_time = dbmodels.DateTimeField(editable=False)
cached_png = dbmodels.TextField(editable=False)
class Meta:
db_table = 'tko_embedded_graphing_queries'
# views
class TestViewManager(TempManager):
def get_query_set(self):
query = super(TestViewManager, self).get_query_set()
# add extra fields to selects, using the SQL itself as the "alias"
extra_select = dict((sql, sql)
for sql in self.model.extra_fields.iterkeys())
return query.extra(select=extra_select)
def _get_include_exclude_suffix(self, exclude):
if exclude:
return '_exclude'
return '_include'
def _add_attribute_join(self, query_set, join_condition,
suffix=None, exclude=False):
if suffix is None:
suffix = self._get_include_exclude_suffix(exclude)
return self.add_join(query_set, 'tko_test_attributes',
join_key='test_idx',
join_condition=join_condition,
suffix=suffix, exclude=exclude)
def _add_label_pivot_table_join(self, query_set, suffix, join_condition='',
exclude=False, force_left_join=False):
return self.add_join(query_set, 'tko_test_labels_tests',
join_key='test_id',
join_condition=join_condition,
suffix=suffix, exclude=exclude,
force_left_join=force_left_join)
def _add_label_joins(self, query_set, suffix=''):
query_set = self._add_label_pivot_table_join(
query_set, suffix=suffix, force_left_join=True)
# since we're not joining from the original table, we can't use
# self.add_join() again
second_join_alias = 'tko_test_labels' + suffix
second_join_condition = ('%s.id = %s.testlabel_id' %
(second_join_alias,
'tko_test_labels_tests' + suffix))
query_set.query.add_custom_join('tko_test_labels',
second_join_condition,
query_set.query.LOUTER,
alias=second_join_alias)
return query_set
def _get_label_ids_from_names(self, label_names):
label_ids = list( # listifying avoids a double query below
TestLabel.objects.filter(name__in=label_names)
.values_list('name', 'id'))
if len(label_ids) < len(set(label_names)):
raise ValueError('Not all labels found: %s' %
', '.join(label_names))
return dict(name_and_id for name_and_id in label_ids)
def _include_or_exclude_labels(self, query_set, label_names, exclude=False):
label_ids = self._get_label_ids_from_names(label_names).itervalues()
suffix = self._get_include_exclude_suffix(exclude)
condition = ('tko_test_labels_tests%s.testlabel_id IN (%s)' %
(suffix,
','.join(str(label_id) for label_id in label_ids)))
return self._add_label_pivot_table_join(query_set,
join_condition=condition,
suffix=suffix,
exclude=exclude)
def _add_custom_select(self, query_set, select_name, select_sql):
return query_set.extra(select={select_name: select_sql})
def _add_select_value(self, query_set, alias):
return self._add_custom_select(query_set, alias,
_quote_name(alias) + '.value')
def _add_select_ifnull(self, query_set, alias, non_null_value):
select_sql = "IF(%s.id IS NOT NULL, '%s', NULL)" % (_quote_name(alias),
non_null_value)
return self._add_custom_select(query_set, alias, select_sql)
def _join_test_label_column(self, query_set, label_name, label_id):
alias = 'test_label_' + label_name
label_query = TestLabel.objects.filter(name=label_name)
query_set = Test.objects.join_custom_field(query_set, label_query,
alias)
query_set = self._add_select_ifnull(query_set, alias, label_name)
return query_set
def _join_test_label_columns(self, query_set, label_names):
label_id_map = self._get_label_ids_from_names(label_names)
for label_name in label_names:
query_set = self._join_test_label_column(query_set, label_name,
label_id_map[label_name])
return query_set
def _join_test_attribute(self, query_set, attribute, alias=None,
extra_join_condition=None):
"""
Join the given TestView QuerySet to TestAttribute. The resulting query
has an additional column for the given attribute named
"attribute_<attribute name>".
"""
if not alias:
alias = 'test_attribute_' + attribute
attribute_query = TestAttribute.objects.filter(attribute=attribute)
if extra_join_condition:
attribute_query = attribute_query.extra(
where=[extra_join_condition])
query_set = Test.objects.join_custom_field(query_set, attribute_query,
alias)
query_set = self._add_select_value(query_set, alias)
return query_set
def _join_machine_label_columns(self, query_set, machine_label_names):
for label_name in machine_label_names:
alias = 'machine_label_' + label_name
condition = "FIND_IN_SET('%s', %s)" % (
label_name, _quote_name(alias) + '.value')
query_set = self._join_test_attribute(
query_set, 'host-labels',
alias=alias, extra_join_condition=condition)
query_set = self._add_select_ifnull(query_set, alias, label_name)
return query_set
def _join_one_iteration_key(self, query_set, result_key, first_alias=None):
alias = 'iteration_result_' + result_key
iteration_query = IterationResult.objects.filter(attribute=result_key)
if first_alias:
# after the first join, we need to match up iteration indices,
# otherwise each join will expand the query by the number of
# iterations and we'll have extraneous rows
iteration_query = iteration_query.extra(
where=['%s.iteration = %s.iteration'
% (_quote_name(alias), _quote_name(first_alias))])
query_set = Test.objects.join_custom_field(query_set, iteration_query,
alias, left_join=False)
# select the iteration value and index for this join
query_set = self._add_select_value(query_set, alias)
if not first_alias:
# for first join, add iteration index select too
query_set = self._add_custom_select(
query_set, 'iteration_index',
_quote_name(alias) + '.iteration')
return query_set, alias
def _join_iteration_results(self, test_view_query_set, result_keys):
"""Join the given TestView QuerySet to IterationResult for one result.
The resulting query looks like a TestView query but has one row per
iteration. Each row includes all the attributes of TestView, an
attribute for each key in result_keys and an iteration_index attribute.
We accomplish this by joining the TestView query to IterationResult
once per result key. Each join is restricted on the result key (and on
the test index, like all one-to-many joins). For the first join, this
is the only restriction, so each TestView row expands to a row per
iteration (per iteration that includes the key, of course). For each
subsequent join, we also restrict the iteration index to match that of
the initial join. This makes each subsequent join produce exactly one
result row for each input row. (This assumes each iteration contains
the same set of keys. Results are undefined if that's not true.)
"""
if not result_keys:
return test_view_query_set
query_set, first_alias = self._join_one_iteration_key(
test_view_query_set, result_keys[0])
for result_key in result_keys[1:]:
query_set, _ = self._join_one_iteration_key(query_set, result_key,
first_alias=first_alias)
return query_set
def _join_job_keyvals(self, query_set, job_keyvals):
for job_keyval in job_keyvals:
alias = 'job_keyval_' + job_keyval
keyval_query = JobKeyval.objects.filter(key=job_keyval)
query_set = Job.objects.join_custom_field(query_set, keyval_query,
alias)
query_set = self._add_select_value(query_set, alias)
return query_set
def _join_iteration_attributes(self, query_set, iteration_attributes):
for attribute in iteration_attributes:
alias = 'iteration_attribute_' + attribute
attribute_query = IterationAttribute.objects.filter(
attribute=attribute)
query_set = Test.objects.join_custom_field(query_set,
attribute_query, alias)
query_set = self._add_select_value(query_set, alias)
return query_set
def get_query_set_with_joins(self, filter_data):
"""
Add joins for querying over test-related items.
These parameters are supported going forward:
* test_attribute_fields: list of attribute names. Each attribute will
be available as a column attribute_<name>.value.
* test_label_fields: list of label names. Each label will be available
as a column label_<name>.id, non-null iff the label is present.
* iteration_result_fields: list of iteration result names. Each
result will be available as a column iteration_<name>.value.
Note that this changes the semantics to return iterations
instead of tests -- if a test has multiple iterations, a row
will be returned for each one. The iteration index is also
available as iteration_<name>.iteration.
* machine_label_fields: list of machine label names. Each will be
available as a column machine_label_<name>.id, non-null iff the
label is present on the machine used in the test.
* job_keyval_fields: list of job keyval names. Each value will be
available as a column job_keyval_<name>.id, non-null iff the
keyval is present in the AFE job.
* iteration_attribute_fields: list of iteration attribute names. Each
attribute will be available as a column
iteration_attribute<name>.id, non-null iff the attribute is
present.
These parameters are deprecated:
* include_labels
* exclude_labels
* include_attributes_where
* exclude_attributes_where
Additionally, this method adds joins if the following strings are
present in extra_where (this is also deprecated):
* test_labels
* test_attributes_host_labels
"""
query_set = self.get_query_set()
test_attributes = filter_data.pop('test_attribute_fields', [])
for attribute in test_attributes:
query_set = self._join_test_attribute(query_set, attribute)
test_labels = filter_data.pop('test_label_fields', [])
query_set = self._join_test_label_columns(query_set, test_labels)
machine_labels = filter_data.pop('machine_label_fields', [])
query_set = self._join_machine_label_columns(query_set, machine_labels)
iteration_keys = filter_data.pop('iteration_result_fields', [])
query_set = self._join_iteration_results(query_set, iteration_keys)
job_keyvals = filter_data.pop('job_keyval_fields', [])
query_set = self._join_job_keyvals(query_set, job_keyvals)
iteration_attributes = filter_data.pop('iteration_attribute_fields', [])
query_set = self._join_iteration_attributes(query_set,
iteration_attributes)
# everything that follows is deprecated behavior
joined = False
extra_where = filter_data.get('extra_where', '')
if 'tko_test_labels' in extra_where:
query_set = self._add_label_joins(query_set)
joined = True
include_labels = filter_data.pop('include_labels', [])
exclude_labels = filter_data.pop('exclude_labels', [])
if include_labels:
query_set = self._include_or_exclude_labels(query_set,
include_labels)
joined = True
if exclude_labels:
query_set = self._include_or_exclude_labels(query_set,
exclude_labels,
exclude=True)
joined = True
include_attributes_where = filter_data.pop('include_attributes_where',
'')
exclude_attributes_where = filter_data.pop('exclude_attributes_where',
'')
if include_attributes_where:
query_set = self._add_attribute_join(
query_set,
join_condition=self.escape_user_sql(include_attributes_where))
joined = True
if exclude_attributes_where:
query_set = self._add_attribute_join(
query_set,
join_condition=self.escape_user_sql(exclude_attributes_where),
exclude=True)
joined = True
if not joined:
filter_data['no_distinct'] = True
if 'tko_test_attributes_host_labels' in extra_where:
query_set = self._add_attribute_join(
query_set, suffix='_host_labels',
join_condition='tko_test_attributes_host_labels.attribute = '
'"host-labels"')
return query_set
def query_test_ids(self, filter_data, apply_presentation=True):
query = self.model.query_objects(filter_data,
apply_presentation=apply_presentation)
dicts = query.values('test_idx')
return [item['test_idx'] for item in dicts]
def query_test_label_ids(self, filter_data):
query_set = self.model.query_objects(filter_data)
query_set = self._add_label_joins(query_set, suffix='_list')
rows = self._custom_select_query(query_set, ['tko_test_labels_list.id'])
return [row[0] for row in rows if row[0] is not None]
def escape_user_sql(self, sql):
sql = super(TestViewManager, self).escape_user_sql(sql)
return sql.replace('test_idx', self.get_key_on_this_table('test_idx'))
class TestView(dbmodels.Model, model_logic.ModelExtensions):
extra_fields = {
'DATE(job_queued_time)': 'job queued day',
'DATE(test_finished_time)': 'test finished day',
}
group_fields = [
'test_name',
'status',
'kernel',
'hostname',
'job_tag',
'job_name',
'platform',
'reason',
'job_owner',
'job_queued_time',
'DATE(job_queued_time)',
'test_started_time',
'test_finished_time',
'DATE(test_finished_time)',
]
test_idx = dbmodels.IntegerField('test index', primary_key=True)
job_idx = dbmodels.IntegerField('job index', null=True, blank=True)
test_name = dbmodels.CharField(blank=True, max_length=90)
subdir = dbmodels.CharField('subdirectory', blank=True, max_length=180)
kernel_idx = dbmodels.IntegerField('kernel index')
status_idx = dbmodels.IntegerField('status index')
reason = dbmodels.CharField(blank=True, max_length=3072)
machine_idx = dbmodels.IntegerField('host index')
test_started_time = dbmodels.DateTimeField(null=True, blank=True)
test_finished_time = dbmodels.DateTimeField(null=True, blank=True)
job_tag = dbmodels.CharField(blank=True, max_length=300)
job_name = dbmodels.CharField(blank=True, max_length=300)
job_owner = dbmodels.CharField('owner', blank=True, max_length=240)
job_queued_time = dbmodels.DateTimeField(null=True, blank=True)
job_started_time = dbmodels.DateTimeField(null=True, blank=True)
job_finished_time = dbmodels.DateTimeField(null=True, blank=True)
afe_job_id = dbmodels.IntegerField(null=True)
hostname = dbmodels.CharField(blank=True, max_length=300)
platform = dbmodels.CharField(blank=True, max_length=240)
machine_owner = dbmodels.CharField(blank=True, max_length=240)
kernel_hash = dbmodels.CharField(blank=True, max_length=105)
kernel_base = dbmodels.CharField(blank=True, max_length=90)
kernel = dbmodels.CharField(blank=True, max_length=300)
status = dbmodels.CharField(blank=True, max_length=30)
objects = TestViewManager()
def save(self):
raise NotImplementedError('TestView is read-only')
def delete(self):
raise NotImplementedError('TestView is read-only')
@classmethod
def query_objects(cls, filter_data, initial_query=None,
apply_presentation=True):
if initial_query is None:
initial_query = cls.objects.get_query_set_with_joins(filter_data)
return super(TestView, cls).query_objects(
filter_data, initial_query=initial_query,
apply_presentation=apply_presentation)
class Meta:
db_table = 'tko_test_view_2'
managed = False
|
yangdongsheng/autotest
|
frontend/tko/models.py
|
Python
|
gpl-2.0
| 30,170 | 0.001624 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# tvalacarta - XBMC Plugin
# Canal para ecuador tv
# http://blog.tvalacarta.info/plugin-xbmc/tvalacarta/
#------------------------------------------------------------
import os
import sys
import urlparse,re
import urllib
import datetime
from core import logger
from core import scrapertools
from core.item import Item
import youtube_channel
__channel__ = "ecuadortv"
DEBUG = True
YOUTUBE_CHANNEL_ID = "RTVEcuador"
def isGeneric():
return True
def mainlist(item):
logger.info("tvalacarta.channels.ecuadortv mainlist")
return youtube_channel.playlists(item,YOUTUBE_CHANNEL_ID)
|
tvalacarta/tvalacarta
|
python/main-classic/channels/ecuadortv.py
|
Python
|
gpl-3.0
| 680 | 0.010294 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging, os
logging.basicConfig(level=logging.INFO)
from deepy.networks import RecursiveAutoEncoder
from deepy.trainers import SGDTrainer, LearningRateAnnealer
from util import get_data, VECTOR_SIZE
model_path = os.path.join(os.path.dirname(__file__), "models", "rae1.gz")
if __name__ == '__main__':
model = RecursiveAutoEncoder(input_dim=VECTOR_SIZE, rep_dim=10)
trainer = SGDTrainer(model)
annealer = LearningRateAnnealer()
trainer.run(get_data(), epoch_controllers=[annealer])
model.save_params(model_path)
|
zomux/deepy
|
examples/auto_encoders/recursive_auto_encoder.py
|
Python
|
mit
| 593 | 0.008432 |
def migrate(cr, version):
if not version:
return
# Replace ids of better_zip by ids of city_zip
cr.execute("""
ALTER TABLE crm_event_compassion
DROP CONSTRAINT crm_event_compassion_zip_id_fkey;
UPDATE crm_event_compassion e
SET zip_id = (
SELECT id FROM res_city_zip
WHERE openupgrade_legacy_12_0_better_zip_id = e.zip_id)
""")
|
CompassionCH/compassion-modules
|
crm_compassion/migrations/12.0.1.0.0/pre-migration.py
|
Python
|
agpl-3.0
| 409 | 0 |
# Copyright 2017 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from six.moves import http_client
from cyborg.api.controllers.v1.deployables import Deployable
from cyborg.tests.unit.api.controllers.v1 import base as v1_test
from cyborg.tests.unit import fake_deployable
from cyborg.agent.rpcapi import AgentAPI
class TestFPGAProgramController(v1_test.APITestV1):
def setUp(self):
super(TestFPGAProgramController, self).setUp()
self.headers = self.gen_headers(self.context)
self.deployable_uuids = ['0acbf8d6-e02a-4394-aae3-57557d209498']
@mock.patch('cyborg.objects.Deployable.get')
@mock.patch('cyborg.agent.rpcapi.AgentAPI.program_fpga_with_bitstream')
def test_program(self, mock_program, mock_get_dep):
self.headers['X-Roles'] = 'admin'
self.headers['Content-Type'] = 'application/json'
dep_uuid = self.deployable_uuids[0]
fake_dep = fake_deployable.fake_deployable_obj(self.context,
uuid=dep_uuid)
mock_get_dep.return_value = fake_dep
mock_program.return_value = None
body = [{"image_uuid": "9a17439a-85d0-4c53-a3d3-0f68a2eac896"}]
response = self.\
patch_json('/accelerators/deployables/%s/program' % dep_uuid,
[{'path': '/program', 'value': body,
'op': 'replace'}],
headers=self.headers)
self.assertEqual(http_client.OK, response.status_code)
data = response.json_body
self.assertEqual(dep_uuid, data['uuid'])
|
openstack/nomad
|
cyborg/tests/unit/api/controllers/v1/test_fpga_program.py
|
Python
|
apache-2.0
| 2,179 | 0 |
'''
Created on Oct 11, 2014
@author: mshepher
'''
from globals import Globals
class Order(object):
EIGENAAR = 0
DIER = 1
GESLACHT = 2
GECASTREERD = 3
AKTIEF = 4
OVERGANGS = 5
GEWICHT = 6 #numerical
PAKKETKG = 7 #float
SOORT = 8
PUP = 9
RAS = 10
def __init__(self,order):
'''order = line from csv file, unparsed'''
rec = order.strip().split(',')
self.base = rec[:self.RAS+1]
self.owner, self.animal = rec[:self.GESLACHT]
self.weight = float(rec[self.GEWICHT])
self.package = float(rec[self.PAKKETKG])
self.kind = rec[self.SOORT].upper()
self.ras = rec[self.RAS].upper()
rest = rec[self.RAS+1:]
if '|' in rest:
splitter = rest.index('|')
self.donts = [i.upper() for i in rest[:splitter]]
self.prefers = [i.upper() for i in rest[splitter+1:]]
else:
self.donts = [i.upper() for i in rest]
self.prefers = []
self.factor = 1.0
self.result = None
self.portie = 'beide'
def get_prefers(self):
return self.prefers
def set_prefers(self, value):
self.prefers = value
def get_base(self):
return ','.join(self.base)
def is_allergic(self,stuff):
'''true if animal is allergic to stuff'''
return stuff in self.donts
def get_donts(self):
return self.donts
def set_donts(self, donts):
self.donts = donts
def get_days(self):
return round(self.package / (self.weight * Globals.FACTOR[self.ras]))
def get_meal_size(self):
return self.weight * self.factor * Globals.FACTOR[self.ras] / 2
def get_weight(self):
return self.weight
def get_package(self):
return self.package
def get_kind(self):
return self.kind
def get_owner(self):
return self.owner
def get_animal(self):
return self.animal
def get_ras(self):
return self.ras
def set_result(self, result):
self.result = result
def get_result(self):
return self.result
def get_factor(self):
return self.factor
def set_factor(self, factor):
self.factor = factor
def get_portie(self):
return self.portie
def set_portie(self, portie):
self.portie = portie
|
micha-shepher/oervoer-wizard
|
oervoer/order.py
|
Python
|
gpl-3.0
| 2,431 | 0.008638 |
from django.db import models
from django.contrib.auth.models import User
class Post(models.Model):
title = models.CharField(max_length=255)
body = models.TextField()
user = models.ForeignKey(User)
|
erkarl/browl-api
|
apps/posts/models.py
|
Python
|
mit
| 211 | 0 |
import csv
from util.helper import *
import util.plot_defaults
from matplotlib.ticker import MaxNLocator
from pylab import figure
parser = argparse.ArgumentParser()
parser.add_argument('--file', '-f',
help="data file directory",
required=True,
action="store",
dest="file")
parser.add_argument('-o',
help="Output directory",
required=True,
action="store",
dest="dir")
args = parser.parse_args()
to_plot = []
cong = ['reno', 'cubic', 'vegas']
bursts = ['0.03', '0.05', '0.07', '0.09']
graphfiles = []
for burst in bursts:
for tcptype in cong:
data = read_list(args.file + '/' + tcptype + '-' + burst +'-raw_data.txt')
xs = col(0, data)
ys = col(1, data)
plt.plot(xs, ys, label=tcptype)
plt.title('Shrew-attack TCP throughput. Burst = ' + burst)
plt.legend(loc='upper left')
plt.xlabel('seconds')
plt.ylabel("% thoroughput")
plt.grid(True)
plt.savefig("{0}/{1}-result.png".format(args.dir, burst))
plt.close()
|
XianliangJ/collections
|
ShrewAttack/plotter.py
|
Python
|
gpl-3.0
| 1,097 | 0.012762 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
import unittest
try:
from unittest import mock
except ImportError:
import mock
import virttest
from virttest.utils_zchannels import ChannelPaths, SubchannelPaths
OUT_OK = ["Device Subchan. DevType CU Type Use PIM PAM POM CHPIDs ",
"----------------------------------------------------------------------",
"0.0.0600 0.0.0000 1732/01 1731/01 yes 80 80 ff 17000000 00000000",
"0.0.0601 0.0.0001 1732/01 1731/01 yes 80 80 ff 17000000 00000000",
"0.0.0602 0.0.0002 1732/01 1731/01 yes 80 80 ff 17000000 00000000",
"0.0.540c 0.0.24ac 3390/0c 3990/e9 yes f0 f0 ff 01021112 00000000",
"0.0.540d 0.0.24ad 3390/0c 3990/e9 yes f0 f0 ff 01021112 00000000",
"none 0.0.26aa f0 f0 ff 11122122 00000000",
"none 0.0.26ab f0 f0 ff 11122122 00000000",
"0.0.570c 0.0.27ac 3390/0c 3990/e9 yes f0 f0 ff 12212231 00000000"]
class TestSubchannelPaths(unittest.TestCase):
def test_get_info(self):
virttest.utils_zchannels.cmd_status_output = mock.Mock(return_value=(0,
"\n".join(OUT_OK)))
subchannel_paths = SubchannelPaths()
subchannel_paths.get_info()
self.assertEqual(8, len(subchannel_paths.devices))
def test_get_first_unused_and_safely_removable(self):
virttest.utils_zchannels.cmd_status_output = mock.Mock(return_value=(0,
"\n".join(OUT_OK)))
subchannel_paths = SubchannelPaths()
subchannel_paths.get_info()
device = subchannel_paths.get_first_unused_and_safely_removable()
self.assertIsNotNone(device)
self.assertEqual("0.0.26aa", device[1])
def test_get_first_unused_and_safely_removable_not_safe(self):
not_safe = OUT_OK.copy()
not_safe[6] = not_safe[6].replace("01021112", "11122122")
virttest.utils_zchannels.cmd_status_output = mock.Mock(return_value=(0,
"\n".join(not_safe)))
subchannel_paths = SubchannelPaths()
subchannel_paths.get_info()
device = subchannel_paths.get_first_unused_and_safely_removable()
self.assertIsNone(device)
def test_get_first_unused_and_safely_removable_not_first(self):
not_safe = OUT_OK.copy()
not_safe[7] = not_safe[7].replace("11122122", "01021112")
virttest.utils_zchannels.cmd_status_output = mock.Mock(return_value=(0,
"\n".join(not_safe)))
subchannel_paths = SubchannelPaths()
subchannel_paths.get_info()
device = subchannel_paths.get_first_unused_and_safely_removable()
self.assertIsNotNone(device)
self.assertEqual("0.0.26ab", device[1])
class TestChannelPaths(unittest.TestCase):
def test__split(self):
chpids = "12345678"
ids = ChannelPaths._split(chpids)
self.assertEqual(4, len(ids))
self.assertEqual("0.12", ids[0])
self.assertEqual("0.78", ids[3])
if __name__ == '__main__':
unittest.main()
|
avocado-framework/avocado-vt
|
virttest/unittests/test_utils_zchannels.py
|
Python
|
gpl-2.0
| 3,733 | 0.00375 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import os
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class EclipseIntegrationTest(PantsRunIntegrationTest):
def _eclipse_test(self, specs, project_dir=os.path.join('.pants.d', 'tmp', 'test-eclipse'),
project_name='project'):
"""Helper method that tests eclipse generation on the input spec list."""
if not os.path.exists(project_dir):
os.makedirs(project_dir)
with temporary_dir(root_dir=project_dir) as path:
pants_run = self.run_pants(['goal', 'eclipse',] + specs
+ ['--no-pantsrc', '--eclipse-project-dir={dir}'.format(dir=path)])
self.assertEquals(pants_run.returncode, self.PANTS_SUCCESS_CODE,
"goal eclipse expected success, got {0}\n"
"got stderr:\n{1}\n"
"got stdout:\n{2}\n".format(pants_run.returncode,
pants_run.stderr_data,
pants_run.stdout_data))
expected_files = ('.classpath', '.project',)
workdir = os.path.join(path, project_name)
self.assertTrue(os.path.exists(workdir),
'Failed to find project_dir at {dir}.'.format(dir=workdir))
self.assertTrue(all(os.path.exists(os.path.join(workdir, name))
for name in expected_files))
# return contents of .classpath so we can verify it
with open(os.path.join(workdir, '.classpath')) as classpath_f:
classpath = classpath_f.read()
# should be at least one input; if not we may have the wrong target path
self.assertIn('<classpathentry kind="src"', classpath)
return classpath
# Test Eclipse generation on example targets; ideally should test that the build "works"
def test_eclipse_on_protobuf(self):
self._eclipse_test(['examples/src/java/com/pants/examples/protobuf::'])
def test_eclipse_on_jaxb(self):
self._eclipse_test(['examples/src/java/com/pants/examples/jaxb/main'])
def test_eclipse_on_unicode(self):
self._eclipse_test(['testprojects/src/java/com/pants/testproject/unicode::'])
def test_eclipse_on_hello(self):
self._eclipse_test(['examples/src/java/com/pants/examples/hello::'])
def test_eclipse_on_annotations(self):
self._eclipse_test(['examples/src/java/com/pants/examples/annotation::'])
def test_eclipse_on_all_examples(self):
self._eclipse_test(['examples/src/java/com/pants/examples::'])
def test_eclipse_on_java_sources(self):
classpath = self._eclipse_test(['testprojects/src/scala/com/pants/testproject/javasources::'])
self.assertIn('path="testprojects.src.java"', classpath)
def test_eclipse_on_thriftdeptest(self):
self._eclipse_test(['testprojects/src/java/com/pants/testproject/thriftdeptest::'])
def test_eclipse_on_scaladepsonboth(self):
classpath = self._eclipse_test(['testprojects/src/scala/com/pants/testproject/scaladepsonboth::'])
# Previously Java dependencies didn't get included
self.assertIn('path="testprojects.src.java"', classpath)
|
square/pants
|
tests/python/pants_test/tasks/test_eclipse_integration.py
|
Python
|
apache-2.0
| 3,401 | 0.010879 |
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import re
import unicodedata
h1_start = re.compile(r"^\s*=(?P<title>[^=]+)=*[ \t]*")
valid_title = re.compile(r"[^=]+")
general_heading = re.compile(r"^\s*(={2,6}(?P<title>" + valid_title.pattern +
")=*)\s*$", flags=re.MULTILINE)
invalid_symbols = re.compile(r"[^\w\-_\s]+")
def strip_accents(s):
return ''.join(
(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(
c) != 'Mn'))
REPLACEMENTS = {
ord('ä'): 'ae',
ord('ö'): 'oe',
ord('ü'): 'ue',
ord('ß'): 'ss',
ord('Ä'): 'Ae',
ord('Ö'): 'Oe',
ord('Ü'): 'Ue',
ord('ẞ'): 'SS'
}
def substitute_umlauts(s):
return s.translate(REPLACEMENTS)
def remove_unallowed_chars(s):
s = invalid_symbols.sub('', s)
return s
def remove_and_compress_whitespaces(s):
return '_'.join(s.split()).strip('_')
def turn_into_valid_short_title(title, short_title_set=(), max_length=20):
st = substitute_umlauts(title)
st = strip_accents(st)
st = remove_unallowed_chars(st)
st = remove_and_compress_whitespaces(st)
st = st.lstrip('1234567890-_')
st = st[:min(len(st), max_length)]
if not st:
st = 'sub'
if st not in short_title_set:
return st
else:
i = 0
while True:
i += 1
suffix = str(i)
new_st = st[:min(max_length - len(suffix), len(st))] + suffix
if new_st not in short_title_set:
return new_st
def get_heading_matcher(level=0):
if 0 < level < 7:
s = "%d" % level
elif level == 0:
s = "1, 6"
else:
raise ValueError(
"level must be between 1 and 6 or 0, but was %d." % level)
pattern = r"^\s*={%s}(?P<title>[^=§]+)" \
r"(?:§\s*(?P<short_title>[^=§\s][^=§]*))?=*\s*$"
return re.compile(pattern % s, flags=re.MULTILINE)
|
Qwlouse/Findeco
|
node_storage/validation.py
|
Python
|
gpl-3.0
| 1,992 | 0.001011 |
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Structure import Structure;
class GIF_IMAGE(Structure):
type_name = 'GIF_IMAGE';
def __init__(self, stream, offset, max_size, parent, name):
import C;
from GIF_BLOCK import GIF_BLOCK;
from GIF_COLORTABLE import GIF_COLORTABLE;
from GIF_IMAGE_DESCRIPTOR import GIF_IMAGE_DESCRIPTOR;
from LZW_compressed_data import LZW_compressed_data;
Structure.__init__(self, stream, offset, max_size, parent, name);
self._descriptor = self.Member(GIF_IMAGE_DESCRIPTOR, 'descriptor');
flags = self._descriptor._Flags;
self._has_local_color_table = flags._LocalColorTable.value == 1;
if self._has_local_color_table:
self._local_color_table_entries = \
2 ** (flags._SizeLocalColorTable.value + 1);
self._local_color_table_sorted = flags._Sort.value == 1;
self._local_color_table = self.Member(GIF_COLORTABLE, \
'local_color_table', self._local_color_table_entries, \
self._local_color_table_sorted);
else:
self._local_color_table = None;
self._lzw_minimum_code_size = self.Member(C.BYTE, 'LZW_minimum_code_size');
if self._lzw_minimum_code_size.value == 0:
self._lzw_minimum_code_size.warnings.append('expected value > 0');
self._compressed_pixel_data_container = self.Member(GIF_BLOCK, 'pixel_data');
self._pixel_data_container = \
self._compressed_pixel_data_container.ContainMember( \
LZW_compressed_data, 'pixel_data', \
self._lzw_minimum_code_size.value);
self._pixel_data = self._pixel_data_container.ContainMember( \
C.STRING, 'pixel_data', \
self._descriptor._Width.value * self._descriptor._Height.value);
|
SkyLined/headsup
|
decode/GIF_IMAGE.py
|
Python
|
apache-2.0
| 2,315 | 0.019438 |
import psycopg2
from db.enums import *
base = psycopg2.connect("dbname='cardkeepersample' user='andrew' host='localhost' password='1234'")
cursor = base.cursor()
# Wrapped queries in alphabetic order
def active_packs(user_id, start=0, count=10):
query = """SELECT packs.pack_id, packs.name FROM user_packs, packs WHERE packs.pack_id = user_packs.pack_id
AND user_packs.status = %s AND user_id = %s ORDER BY pack_id
OFFSET %s LIMIT %s;"""
cursor.execute(query, (CardStatusType.ACTIVE.value, user_id, start, count))
return cursor.fetchall()
def add_pack(user_id, pack_id):
query = """INSERT INTO user_packs (pack_id, user_id, status) VALUES (%s, %s, 'Active');"""
cursor.execute(query, (pack_id, user_id))
query = """SELECT card_id FROM cards WHERE cards.pack_id = %s"""
cursor.execute(query, (pack_id,))
cards = cursor.fetchall()
for i in cards:
query = """INSERT INTO user_cards (user_id, card_id, times_reviewed, correct_answers, status) VALUES (%s, %s, 0, 0, 'Active');"""
cursor.execute(query, (user_id, i[0]))
base.commit()
def add_user(user):
query = """INSERT INTO users (user_id, name, general_goal, weekly_goal, notifications_learn, notifications_stats, joined)
VALUES (%s, %s, %s, %s, %s, %s, current_date);"""
cursor.execute(query, tuple(user))
base.commit()
def available_packs(user_id):
query = """SELECT packs.pack_id, packs.name FROM packs
WHERE packs.privacy = 'public' LIMIT 105;"""
cursor.execute(query)
return cursor.fetchall()
def available_groups(user_id, rights=RightsType.USER, include_higher=False):
query = """SELECT groups.group_id, groups.name FROM groups, user_groups
WHERE groups.group_id = user_groups.group_id
AND user_groups.user_id = %s
AND user_groups.rights """ + ("<" if include_higher else "") + "= %s;"""
cursor.execute(query, (user_id, rights))
return cursor.fetchall()
def delete_pack(pack_id):
owner_id = get_pack(pack_id)['owner_id']
cursor.execute('''
DELETE FROM user_cards
USING cards
WHERE
user_cards.card_id = cards.card_id AND
cards.pack_id = %s;
''', (pack_id,))
cursor.execute(
'DELETE FROM cards WHERE pack_id = %s;',
(pack_id,)
)
cursor.execute(
'DELETE FROM user_packs WHERE pack_id = %s;',
(pack_id,)
)
cursor.execute(
'DELETE FROM packs WHERE pack_id = %s;',
(pack_id,)
)
base.commit()
def get_all_cards_in_pack(pack_id):
cursor.execute('''
SELECT card_id, front, back, comment, type
FROM cards
WHERE pack_id = %s;
''', (pack_id,))
return [{'card_id': card_id, 'front': front, 'back': back,
'comment': comment, 'type': tp}
for card_id, front, back, comment, tp
in cursor.fetchall()]
def get_pack(pack_id, user_id=None):
cursor.execute(
'SELECT name, owner_id, privacy FROM packs WHERE pack_id = %s;',
(pack_id,)
)
name, owner_id, privacy = cursor.fetchone()
status = None
if user_id is not None:
cursor.execute('''
SELECT status FROM user_packs
WHERE user_id = %s AND pack_id = %s;
''', (user_id, pack_id))
status = cursor.fetchone()[0]
return {
'pack_id': pack_id,
'name': name,
'owner_id': owner_id,
'privacy': privacy,
'status': status
}
def if_added(user_id, pack_id):
query = "SELECT * FROM user_packs WHERE user_id = %s AND pack_id = %s;"
cursor.execute(query, (user_id, pack_id))
return list(cursor.fetchall())
# TODO: Take permissions lists into account
def has_pack_read_access(pack_id, user_id):
pack_info = get_pack(pack_id)
return user_id == pack_info['owner_id'] or pack_info['privacy'] == 'public'
def if_registered(user_id):
query = "SELECT * FROM users WHERE users.user_id = %s;"
cursor.execute(query, (user_id,))
return True if len(cursor.fetchall()) else False
def cards_for_learning(user_id):
query = """SELECT cards.front, cards.back, cards.comment FROM user_cards, cards
WHERE user_cards.card_id = cards.card_id AND
user_id = %s AND cards.type = %s"""
cursor.execute(query, (user_id, CardType.SHORT))
return cursor.fetchall()
def new_card(front, back):
query = "INSERT INTO cards (front, back) VALUES (%s, %s);"
cursor.execute(query, (front, back))
base.commit()
def new_group(name, owner, privacy="public"):
query = "INSERT INTO groups (name, privacy, owner_id) VALUES (%s, %s, %s);"
cursor.execute(query, (name, privacy, owner))
base.commit()
def new_pack(name, owner, privacy=PrivacyType.PUBLIC, status=CardStatusType.ACTIVE, cards=[]):
if isinstance(privacy, PrivacyType):
privacy = privacy.value
if isinstance(status, CardStatusType):
status = status.value
query = "INSERT INTO packs (name, owner_id, privacy) VALUES (%s, %s, %s);"
cursor.execute(query, (name, owner, privacy))
query = "SELECT pack_id FROM packs WHERE name = %s AND owner_id = %s;"
cursor.execute(query, (name, owner))
pack_id = cursor.fetchone()[0]
query = "INSERT INTO user_packs (user_id, pack_id, status) VALUES (%s, %s, %s);"
cursor.execute(query, (owner, pack_id, status))
insert_query = "INSERT INTO cards (pack_id, front, back, comment, type) VALUES (%s, %s, %s, %s, %s) RETURNING card_id;"
insert2_query = "INSERT INTO user_cards (user_id, card_id, times_reviewed, correct_answers, status)" \
"VALUES (%s, %s, 0, 0, 'Active');"
for card in cards:
front = card['front']
back = card['back']
comment = card['comment']
cursor.execute(insert_query, (pack_id, front, back, comment, CardType.SHORT.value))
card_id = cursor.fetchone()[0]
cursor.execute(insert2_query, (owner, card_id))
base.commit()
return pack_id
def select_cards(user_id, pack_id):
print(user_id, pack_id)
query = """SELECT cards.card_id, cards.front, cards.back, cards.comment
FROM cards, user_cards
WHERE cards.card_id = user_cards.card_id
AND user_cards.status = %s
AND cards.pack_id = %s
AND user_cards.user_id = %s"""
cursor.execute(query, (CardStatusType.ACTIVE.value, pack_id, user_id))
return cursor.fetchall()
def update_card_data(user_id, card_id, answer):
query = """UPDATE user_cards SET times_reviewed = times_reviewed+1, correct_answers = correct_answers+%s
WHERE user_id = %s AND card_id = %s"""
cursor.execute(query, (answer, user_id, card_id))
base.commit()
def update_card_status(user_id, card_id, status):
query = """UPDATE user_cards SET status = %s
WHERE user_id = %s AND card_id = %s"""
cursor.execute(query, (status, user_id, card_id))
base.commit()
def update_pack_name(pack_id, new_name):
query = 'UPDATE packs SET name = %s WHERE pack_id = %s;'
cursor.execute(query, (new_name, pack_id))
base.commit()
def update_pack_privacy(pack_id, new_privacy):
if isinstance(new_privacy, PrivacyType):
new_privacy = new_privacy.value
query = 'UPDATE packs SET privacy = %s WHERE pack_id = %s;'
cursor.execute(query, (new_privacy, pack_id))
base.commit()
def update_pack_status(user_id, pack_id, status):
query = """UPDATE user_cards SET status = %s
WHERE user_id = %s AND card_id = %s"""
cursor.execute(query, (status, user_id, pack_id))
base.commit()
|
andrewgolman/Learning_Cards
|
bot/db/queries.py
|
Python
|
gpl-3.0
| 7,726 | 0.001941 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'machine.domain'
db.add_column('inventory_machine', 'domain',
self.gf('django.db.models.fields.CharField')(default='undefined', max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'machine.uuid'
db.add_column('inventory_machine', 'uuid',
self.gf('django.db.models.fields.CharField')(default='undefined', max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'machine.language'
db.add_column('inventory_machine', 'language',
self.gf('django.db.models.fields.CharField')(default='undefined', max_length=100, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'machine.domain'
db.delete_column('inventory_machine', 'domain')
# Deleting field 'machine.uuid'
db.delete_column('inventory_machine', 'uuid')
# Deleting field 'machine.language'
db.delete_column('inventory_machine', 'language')
models = {
'deploy.package': {
'Meta': {'object_name': 'package'},
'command': ('django.db.models.fields.TextField', [], {'max_length': '1000'}),
'conditions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['deploy.packagecondition']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'filename': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignoreperiod': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'packagesum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'})
},
'deploy.packagecondition': {
'Meta': {'object_name': 'packagecondition'},
'depends': ('django.db.models.fields.CharField', [], {'default': "'installed'", 'max_length': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'softwarename': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'softwareversion': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '500', 'null': 'True', 'blank': 'True'})
},
'deploy.packageprofile': {
'Meta': {'object_name': 'packageprofile'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'packages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['deploy.package']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['deploy.packageprofile']"})
},
'deploy.timeprofile': {
'Meta': {'object_name': 'timeprofile'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'end_time': ('django.db.models.fields.TimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'start_time': ('django.db.models.fields.TimeField', [], {})
},
'inventory.entity': {
'Meta': {'object_name': 'entity'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000'}),
'force_packageprofile': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
'force_timeprofile': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'old_packageprofile': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_packageprofile'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['deploy.packageprofile']"}),
'old_timeprofile': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_timeprofile'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['deploy.timeprofile']"}),
'packageprofile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['deploy.packageprofile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['inventory.entity']"}),
'timeprofile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['deploy.timeprofile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
'inventory.machine': {
'Meta': {'object_name': 'machine'},
'domain': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.entity']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'lastsave': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'manualy_created': ('django.db.models.fields.CharField', [], {'default': "'yes'", 'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'netsum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'ossum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'packageprofile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['deploy.packageprofile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'packages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['deploy.package']", 'null': 'True', 'blank': 'True'}),
'product': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'softsum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'timeprofile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['deploy.timeprofile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'typemachine': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.typemachine']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'vendor': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'inventory.net': {
'Meta': {'object_name': 'net'},
'host': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.machine']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'manualy_created': ('django.db.models.fields.CharField', [], {'default': "'yes'", 'max_length': '3'}),
'mask': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'inventory.osdistribution': {
'Meta': {'object_name': 'osdistribution'},
'arch': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.machine']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manualy_created': ('django.db.models.fields.CharField', [], {'default': "'yes'", 'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'systemdrive': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'inventory.software': {
'Meta': {'object_name': 'software'},
'host': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.machine']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manualy_created': ('django.db.models.fields.CharField', [], {'default': "'yes'", 'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'uninstall': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '500', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '500', 'null': 'True', 'blank': 'True'})
},
'inventory.typemachine': {
'Meta': {'object_name': 'typemachine'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['inventory']
|
updatengine/updatengine-server
|
inventory/migrations/0013_auto__add_field_machine_domain__add_field_machine_uuid__add_field_mach.py
|
Python
|
gpl-2.0
| 11,272 | 0.006831 |
from django.contrib import admin
from .models import BackgroundImages, Widget
class WidgetAdmin(admin.ModelAdmin):
list_display = ('name', 'link', 'is_featured')
ordering = ('-id',)
class BackgroundAdmin(admin.ModelAdmin):
list_display = ('name', 'created_at')
ordering = ('-id',)
admin.site.register(Widget, WidgetAdmin)
admin.site.register(BackgroundImages, BackgroundAdmin)
|
malikshahzad228/widget-jack
|
widgets/admin.py
|
Python
|
mit
| 400 | 0 |
# Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Commandline arguments related to error handling
"""
from __future__ import print_function, division, absolute_import
def add_error_args(arg_parser):
error_group = arg_parser.add_argument_group(
title="Errors",
description="Options for error handling")
error_group.add_argument(
"--skip-variant-errors",
default=False,
action="store_true",
help="Skip variants which cause runtime errors of any kind")
return error_group
|
hammerlab/topiary
|
topiary/cli/errors.py
|
Python
|
apache-2.0
| 1,087 | 0.00092 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.