repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
piyushroshan/tuxofwar2012
|
questiondb.py
|
1
|
1400
|
# Model Defining Questions Database
import string
from google.appengine.ext import db
class questionm(db.Model):
questionNumber = db.IntegerProperty(required=True)
question = db.StringProperty(required=True, multiline=True)
qimage = db.StringProperty()
opt1 = db.StringProperty(required=True, multiline=True)
opt2 = db.StringProperty(required=True, multiline=True)
opt3 = db.StringProperty(required=True, multiline=True)
opt4 = db.StringProperty(required=True, multiline=True)
ans = db.StringProperty(required=True)
def getQuestion(num,var):
query = questionm.all()
q = query.filter('questionNumber =',num).get()
if q:
return ("{"+
"\"num\" : " + "\""+ str(var) +"\""+","+
"\"question\" : "+"\""+q.question.replace('\r\n','<br />')+"\""+","+
"\"image\" : "+"\""+q.qimage+"\""+","+
"\"options\" : " + "["+
"\""+q.opt1.replace('\r\n','<br />')+"\""+","+
"\""+q.opt2.replace('\r\n','<br />')+"\""+","+
"\""+q.opt3.replace('\r\n','<br />')+"\""+","+
"\""+q.opt4.replace('\r\n','<br />')+"\""+
"]"+
"}")
else:
return ("{"+
"\"num\" : " + "\""+"\""+","+
"\"question\" : "+"\""+"Sorry question not found. We'll fix it Soon"+"\""+","+
"\"image\" : "+"\""+"\""+","+
"\"options\" : " + "["+
"\""+""+"\""+","+
"\""+""+"\""+","+
"\""+""+"\""+","+
"\""+""+"\""+
"]"+
"}")
|
gpl-2.0
|
fedora-infra/anitya
|
anitya/lib/versions/base.py
|
2
|
7364
|
# -*- coding: utf-8 -*-
#
# Copyright © 2017-2020 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
"""The Anitya versions API."""
from __future__ import unicode_literals
import functools
import re
from typing import Optional
from datetime import datetime
from anitya.lib.exceptions import InvalidVersion
#: A regular expression to determine if the version string contains a 'v' prefix.
v_prefix = re.compile(r"v\d.*")
@functools.total_ordering
class Version(object):
"""The base class for versions."""
name = "Generic Version"
def __init__(
self,
version: Optional[str] = None,
prefix: Optional[str] = None,
created_on: Optional[datetime] = None,
pattern: Optional[str] = None,
cursor: Optional[str] = None,
commit_url: Optional[str] = None,
pre_release_filter: Optional[str] = None,
):
"""
Constructor of Version class.
Params:
version: Raw version
prefix: Prefix to remove
created_on: Date of creation
pattern: Calendar version pattern.
See `Calendar version scheme_` for more information.
cursor: An opaque, backend-specific cursor pointing to the version.
commit_url: A URL pointing to the commit tagged as the version.
pre_release_filter: A filter used to identify pre-release versions
"""
self.version = version
if prefix:
self.prefixes = prefix.split(";")
# Sort from shorter to longest, this will prevent stripping
# shorter prefix instead of larger.
# For example:
# version = release_db-1.2.3
# prefixes = release_db-;release
# would return db-1.2.3 instead of 1.2.3 if the sort is not done
self.prefixes.sort(key=len)
else:
self.prefixes = []
self.created_on = created_on
if pattern:
self.pattern = pattern.upper()
else:
self.pattern = None
self.cursor = cursor
self.commit_url = commit_url
if pre_release_filter:
self.pre_release_filters = pre_release_filter.split(";")
else:
self.pre_release_filters = []
def __str__(self):
"""
Return a parsed, string version of this instance's version.
If parsing fails, the original version string is returned.
"""
try:
return str(self.parse())
except InvalidVersion:
return self.version
def parse(self):
"""
Parse the version string to an object representing the version.
This does some minimal string processing, stripping any prefix set on
project.
Returns:
str: The version string. Sub-classes may return a different type.
object: Sub-classes may return a special class that represents the
version. This must support comparison operations and return
a parsed, prefix-stripped version when ``__str__`` is invoked.
Raises:
InvalidVersion: If the version cannot be parsed.
"""
# If there's a prefix set on the project, strip it if it's present
version = self.version
for prefix in self.prefixes:
if prefix and self.version.startswith(prefix):
version = self.version[len(prefix) :].strip()
# Many projects prefix their tags with 'v', so strip it if it's present
if v_prefix.match(version):
version = version[1:]
return version
def prerelease(self):
"""
Check if a version is a pre-release version.
This basic version implementation does not have a concept of
pre-releases.
"""
return False
def postrelease(self):
"""
Check if a version is a post-release version.
This basic version implementation does not have a concept of
post-releases.
"""
return False
def newer(self, other_versions):
"""
Check a version against a list of other versions to see if it's newer.
Example:
>>> version = Version(version='1.1.0')
>>> version.newer([Version(version='1.0.0')])
True
>>> version.newer(['1.0.0', '0.0.1']) # You can pass strings!
True
>>> version.newer(['1.2.0', '2.0.1'])
False
Args:
other_versions (list): A list of version strings or Version
objects to check the `version` string against.
Returns:
bool: True if self is the newest version, ``False otherwise``.
Raises:
InvalidVersion: if one or more of the version
strings provided cannot be parsed.
"""
if isinstance(other_versions, (Version, str)):
other_versions = [other_versions]
cast_versions = []
for version in other_versions:
if not isinstance(version, type(self)):
version = type(self)(version=version)
cast_versions.append(version)
return all([self.parse() > v.parse() for v in cast_versions])
def __lt__(self, other):
"""Support < comparison via objects returned from :meth:`parse`"""
try:
parsed_self = self.parse()
except InvalidVersion:
parsed_self = None
try:
parsed_other = other.parse()
except InvalidVersion:
parsed_other = None
# Handle the cases where one or both aren't parsable. Parsable versions
# always sort higher than unparsable versions.
if not parsed_self and not parsed_other:
return self.version.__lt__(other.version)
if not parsed_other:
return False
if not parsed_self:
return True
return parsed_self.__lt__(parsed_other)
def __eq__(self, other):
"""Support == comparison via objects returned from :meth:`parse`"""
try:
parsed_self = self.parse()
except InvalidVersion:
parsed_self = None
try:
parsed_other = other.parse()
except InvalidVersion:
parsed_other = None
if not parsed_self or not parsed_other:
return self.version.__eq__(other.version)
return parsed_self.__eq__(parsed_other)
|
gpl-2.0
|
cgstudiomap/cgstudiomap
|
main/parts/geospatial/base_geoengine_demo/geo_npa.py
|
7
|
2713
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi
# Copyright 2011-2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, api
from openerp.addons.base_geoengine import geo_model
from openerp.addons.base_geoengine import fields as geo_fields
class NPA(geo_model.GeoModel):
"""GEO OSV SAMPLE"""
_name = "dummy.zip"
priority = fields.Integer('Priority', default=100)
name = fields.Char('ZIP', size=64, index=True, required=True)
city = fields.Char('City', size=64, index=True, required=True)
the_geom = geo_fields.GeoMultiPolygon('NPA Shape')
total_sales = fields.Float(
compute='_get_ZIP_total_sales',
string='Spatial! Total Sales',
)
@api.multi
def _get_ZIP_total_sales(self):
"""Return the total of the invoiced sales for this npa"""
mach_obj = self.env['geoengine.demo.automatic.retailing.machine']
for rec in self:
res = mach_obj.geo_search(
domain=[],
geo_domain=[
('the_point',
'geo_intersect',
{'dummy.zip.the_geom': [('id', '=', rec.id)]})])
cursor = self.env.cr
if res:
cursor.execute("SELECT sum(total_sales) from"
" geoengine_demo_automatic_retailing_machine "
"where id in %s;",
(tuple(res),))
res = cursor.fetchone()
if res:
rec.total_sales = res[0] or 0.0
else:
rec.total_sales = 0.0
else:
rec.total_sales = 0.0
def name_get(self, cursor, uid, ids, context=None):
res = []
for r in self.browse(cursor, uid, ids):
res.append((r.id, u"%s %s" % (r.name, r.city)))
return res
|
agpl-3.0
|
kawasaki2013/python-for-android-x86
|
python3-alpha/python3-src/Lib/test/test_compare.py
|
169
|
1666
|
import unittest
from test import support
class Empty:
def __repr__(self):
return '<Empty>'
class Cmp:
def __init__(self,arg):
self.arg = arg
def __repr__(self):
return '<Cmp %s>' % self.arg
def __eq__(self, other):
return self.arg == other
class Anything:
def __eq__(self, other):
return True
def __ne__(self, other):
return False
class ComparisonTest(unittest.TestCase):
set1 = [2, 2.0, 2, 2+0j, Cmp(2.0)]
set2 = [[1], (3,), None, Empty()]
candidates = set1 + set2
def test_comparisons(self):
for a in self.candidates:
for b in self.candidates:
if ((a in self.set1) and (b in self.set1)) or a is b:
self.assertEqual(a, b)
else:
self.assertNotEqual(a, b)
def test_id_comparisons(self):
# Ensure default comparison compares id() of args
L = []
for i in range(10):
L.insert(len(L)//2, Empty())
for a in L:
for b in L:
self.assertEqual(a == b, id(a) == id(b),
'a=%r, b=%r' % (a, b))
def test_ne_defaults_to_not_eq(self):
a = Cmp(1)
b = Cmp(1)
self.assertTrue(a == b)
self.assertFalse(a != b)
def test_issue_1393(self):
x = lambda: None
self.assertEqual(x, Anything())
self.assertEqual(Anything(), x)
y = object()
self.assertEqual(y, Anything())
self.assertEqual(Anything(), y)
def test_main():
support.run_unittest(ComparisonTest)
if __name__ == '__main__':
test_main()
|
apache-2.0
|
chubbymaggie/angr
|
angr/procedures/cgc/deallocate.py
|
5
|
1798
|
import angr
import logging
l = logging.getLogger("angr.procedures.cgc.deallocate")
class deallocate(angr.SimProcedure):
#pylint:disable=arguments-differ
IS_SYSCALL = True
def run(self, addr, length): #pylint:disable=unused-argument
# return code (see deallocate() docs)
r = self.state.se.ite_cases((
(addr % 0x1000 != 0, self.state.cgc.EINVAL),
(length == 0, self.state.cgc.EINVAL),
(self.state.cgc.addr_invalid(addr), self.state.cgc.EINVAL),
(self.state.cgc.addr_invalid(addr + length), self.state.cgc.EINVAL),
), self.state.se.BVV(0, self.state.arch.bits))
if self.state.se.symbolic(addr):
l.warning("Concretizing symbolic address passed to deallocate to max_int")
addr = self.state.se.max_int(addr)
# into a page
page_size = self.state.memory.mem._page_size
base_page_num = addr / page_size
if self.state.se.symbolic(length):
l.warning("Concretizing symbolic length passed to deallocate to max_int")
length = self.state.se.max_int(length)
aligned_length = ((length + 0xfff) / 0x1000) * 0x1000
# only add sinkholes and unmap on success
if self.state.se.max_int(r) == 0:
# shorten length
allowed_pages = 0
while allowed_pages * page_size < aligned_length and \
base_page_num + allowed_pages in self.state.memory.mem._pages:
allowed_pages += 1
if allowed_pages == 0:
return r
allowed_length = allowed_pages * page_size
self.state.cgc.add_sinkhole(addr, allowed_length)
self.state.memory.unmap_region(addr, allowed_length)
return r
|
bsd-2-clause
|
playpauseandstop/setman
|
setman/utils/importlib.py
|
1
|
1408
|
"""
Backported from `importlib <http://pypi.python.org/pypi/importlib>` library,
which itself backported from Python 3.x branch.
"""
# While not critical (and in no way guaranteed!), it would be nice to keep this
# code compatible with Python 2.3.
import sys
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
|
bsd-3-clause
|
Passtechsoft/TPEAlpGen
|
blender/release/scripts/startup/bl_ui/properties_constraint.py
|
2
|
30186
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Panel
class ConstraintButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "constraint"
def draw_constraint(self, context, con):
layout = self.layout
box = layout.template_constraint(con)
if box:
# match enum type to our functions, avoids a lookup table.
getattr(self, con.type)(context, box, con)
if con.type not in {'RIGID_BODY_JOINT', 'NULL'}:
box.prop(con, "influence")
@staticmethod
def space_template(layout, con, target=True, owner=True):
if target or owner:
split = layout.split(percentage=0.2)
split.label(text="Space:")
row = split.row()
if target:
row.prop(con, "target_space", text="")
if target and owner:
row.label(icon='ARROW_LEFTRIGHT')
if owner:
row.prop(con, "owner_space", text="")
@staticmethod
def target_template(layout, con, subtargets=True):
layout.prop(con, "target") # XXX limiting settings for only 'curves' or some type of object
if con.target and subtargets:
if con.target.type == 'ARMATURE':
layout.prop_search(con, "subtarget", con.target.data, "bones", text="Bone")
if hasattr(con, "head_tail"):
row = layout.row()
row.label(text="Head/Tail:")
row.prop(con, "head_tail", text="")
elif con.target.type in {'MESH', 'LATTICE'}:
layout.prop_search(con, "subtarget", con.target, "vertex_groups", text="Vertex Group")
@staticmethod
def ik_template(layout, con):
# only used for iTaSC
layout.prop(con, "pole_target")
if con.pole_target and con.pole_target.type == 'ARMATURE':
layout.prop_search(con, "pole_subtarget", con.pole_target.data, "bones", text="Bone")
if con.pole_target:
row = layout.row()
row.label()
row.prop(con, "pole_angle")
split = layout.split(percentage=0.33)
col = split.column()
col.prop(con, "use_tail")
col.prop(con, "use_stretch")
col = split.column()
col.prop(con, "chain_count")
def CHILD_OF(self, context, layout, con):
self.target_template(layout, con)
split = layout.split()
col = split.column()
col.label(text="Location:")
col.prop(con, "use_location_x", text="X")
col.prop(con, "use_location_y", text="Y")
col.prop(con, "use_location_z", text="Z")
col = split.column()
col.label(text="Rotation:")
col.prop(con, "use_rotation_x", text="X")
col.prop(con, "use_rotation_y", text="Y")
col.prop(con, "use_rotation_z", text="Z")
col = split.column()
col.label(text="Scale:")
col.prop(con, "use_scale_x", text="X")
col.prop(con, "use_scale_y", text="Y")
col.prop(con, "use_scale_z", text="Z")
row = layout.row()
row.operator("constraint.childof_set_inverse")
row.operator("constraint.childof_clear_inverse")
def TRACK_TO(self, context, layout, con):
self.target_template(layout, con)
row = layout.row()
row.label(text="To:")
row.prop(con, "track_axis", expand=True)
row = layout.row()
row.prop(con, "up_axis", text="Up")
row.prop(con, "use_target_z")
self.space_template(layout, con)
def IK(self, context, layout, con):
if context.object.pose.ik_solver == 'ITASC':
layout.prop(con, "ik_type")
getattr(self, 'IK_' + con.ik_type)(context, layout, con)
else:
# Standard IK constraint
self.target_template(layout, con)
layout.prop(con, "pole_target")
if con.pole_target and con.pole_target.type == 'ARMATURE':
layout.prop_search(con, "pole_subtarget", con.pole_target.data, "bones", text="Bone")
if con.pole_target:
row = layout.row()
row.prop(con, "pole_angle")
row.label()
split = layout.split()
col = split.column()
col.prop(con, "iterations")
col.prop(con, "chain_count")
col = split.column()
col.prop(con, "use_tail")
col.prop(con, "use_stretch")
layout.label(text="Weight:")
split = layout.split()
col = split.column()
row = col.row(align=True)
row.prop(con, "use_location", text="")
sub = row.row(align=True)
sub.active = con.use_location
sub.prop(con, "weight", text="Position", slider=True)
col = split.column()
row = col.row(align=True)
row.prop(con, "use_rotation", text="")
sub = row.row(align=True)
sub.active = con.use_rotation
sub.prop(con, "orient_weight", text="Rotation", slider=True)
def IK_COPY_POSE(self, context, layout, con):
self.target_template(layout, con)
self.ik_template(layout, con)
row = layout.row()
row.label(text="Axis Ref:")
row.prop(con, "reference_axis", expand=True)
split = layout.split(percentage=0.33)
split.row().prop(con, "use_location")
row = split.row()
row.prop(con, "weight", text="Weight", slider=True)
row.active = con.use_location
split = layout.split(percentage=0.33)
row = split.row()
row.label(text="Lock:")
row = split.row()
row.prop(con, "lock_location_x", text="X")
row.prop(con, "lock_location_y", text="Y")
row.prop(con, "lock_location_z", text="Z")
split.active = con.use_location
split = layout.split(percentage=0.33)
split.row().prop(con, "use_rotation")
row = split.row()
row.prop(con, "orient_weight", text="Weight", slider=True)
row.active = con.use_rotation
split = layout.split(percentage=0.33)
row = split.row()
row.label(text="Lock:")
row = split.row()
row.prop(con, "lock_rotation_x", text="X")
row.prop(con, "lock_rotation_y", text="Y")
row.prop(con, "lock_rotation_z", text="Z")
split.active = con.use_rotation
def IK_DISTANCE(self, context, layout, con):
self.target_template(layout, con)
self.ik_template(layout, con)
layout.prop(con, "limit_mode")
row = layout.row()
row.prop(con, "weight", text="Weight", slider=True)
row.prop(con, "distance", text="Distance", slider=True)
def FOLLOW_PATH(self, context, layout, con):
self.target_template(layout, con)
layout.operator("constraint.followpath_path_animate", text="Animate Path", icon='ANIM_DATA')
split = layout.split()
col = split.column()
col.prop(con, "use_curve_follow")
col.prop(con, "use_curve_radius")
col = split.column()
col.prop(con, "use_fixed_location")
if con.use_fixed_location:
col.prop(con, "offset_factor", text="Offset")
else:
col.prop(con, "offset")
row = layout.row()
row.label(text="Forward:")
row.prop(con, "forward_axis", expand=True)
row = layout.row()
row.prop(con, "up_axis", text="Up")
row.label()
def LIMIT_ROTATION(self, context, layout, con):
split = layout.split()
col = split.column(align=True)
col.prop(con, "use_limit_x")
sub = col.column(align=True)
sub.active = con.use_limit_x
sub.prop(con, "min_x", text="Min")
sub.prop(con, "max_x", text="Max")
col = split.column(align=True)
col.prop(con, "use_limit_y")
sub = col.column(align=True)
sub.active = con.use_limit_y
sub.prop(con, "min_y", text="Min")
sub.prop(con, "max_y", text="Max")
col = split.column(align=True)
col.prop(con, "use_limit_z")
sub = col.column(align=True)
sub.active = con.use_limit_z
sub.prop(con, "min_z", text="Min")
sub.prop(con, "max_z", text="Max")
layout.prop(con, "use_transform_limit")
row = layout.row()
row.label(text="Convert:")
row.prop(con, "owner_space", text="")
def LIMIT_LOCATION(self, context, layout, con):
split = layout.split()
col = split.column()
col.prop(con, "use_min_x")
sub = col.column()
sub.active = con.use_min_x
sub.prop(con, "min_x", text="")
col.prop(con, "use_max_x")
sub = col.column()
sub.active = con.use_max_x
sub.prop(con, "max_x", text="")
col = split.column()
col.prop(con, "use_min_y")
sub = col.column()
sub.active = con.use_min_y
sub.prop(con, "min_y", text="")
col.prop(con, "use_max_y")
sub = col.column()
sub.active = con.use_max_y
sub.prop(con, "max_y", text="")
col = split.column()
col.prop(con, "use_min_z")
sub = col.column()
sub.active = con.use_min_z
sub.prop(con, "min_z", text="")
col.prop(con, "use_max_z")
sub = col.column()
sub.active = con.use_max_z
sub.prop(con, "max_z", text="")
row = layout.row()
row.prop(con, "use_transform_limit")
row.label()
row = layout.row()
row.label(text="Convert:")
row.prop(con, "owner_space", text="")
def LIMIT_SCALE(self, context, layout, con):
split = layout.split()
col = split.column()
col.prop(con, "use_min_x")
sub = col.column()
sub.active = con.use_min_x
sub.prop(con, "min_x", text="")
col.prop(con, "use_max_x")
sub = col.column()
sub.active = con.use_max_x
sub.prop(con, "max_x", text="")
col = split.column()
col.prop(con, "use_min_y")
sub = col.column()
sub.active = con.use_min_y
sub.prop(con, "min_y", text="")
col.prop(con, "use_max_y")
sub = col.column()
sub.active = con.use_max_y
sub.prop(con, "max_y", text="")
col = split.column()
col.prop(con, "use_min_z")
sub = col.column()
sub.active = con.use_min_z
sub.prop(con, "min_z", text="")
col.prop(con, "use_max_z")
sub = col.column()
sub.active = con.use_max_z
sub.prop(con, "max_z", text="")
row = layout.row()
row.prop(con, "use_transform_limit")
row.label()
row = layout.row()
row.label(text="Convert:")
row.prop(con, "owner_space", text="")
def COPY_ROTATION(self, context, layout, con):
self.target_template(layout, con)
split = layout.split()
col = split.column()
col.prop(con, "use_x", text="X")
sub = col.column()
sub.active = con.use_x
sub.prop(con, "invert_x", text="Invert")
col = split.column()
col.prop(con, "use_y", text="Y")
sub = col.column()
sub.active = con.use_y
sub.prop(con, "invert_y", text="Invert")
col = split.column()
col.prop(con, "use_z", text="Z")
sub = col.column()
sub.active = con.use_z
sub.prop(con, "invert_z", text="Invert")
layout.prop(con, "use_offset")
self.space_template(layout, con)
def COPY_LOCATION(self, context, layout, con):
self.target_template(layout, con)
split = layout.split()
col = split.column()
col.prop(con, "use_x", text="X")
sub = col.column()
sub.active = con.use_x
sub.prop(con, "invert_x", text="Invert")
col = split.column()
col.prop(con, "use_y", text="Y")
sub = col.column()
sub.active = con.use_y
sub.prop(con, "invert_y", text="Invert")
col = split.column()
col.prop(con, "use_z", text="Z")
sub = col.column()
sub.active = con.use_z
sub.prop(con, "invert_z", text="Invert")
layout.prop(con, "use_offset")
self.space_template(layout, con)
def COPY_SCALE(self, context, layout, con):
self.target_template(layout, con)
row = layout.row(align=True)
row.prop(con, "use_x", text="X")
row.prop(con, "use_y", text="Y")
row.prop(con, "use_z", text="Z")
layout.prop(con, "use_offset")
self.space_template(layout, con)
def MAINTAIN_VOLUME(self, context, layout, con):
row = layout.row()
row.label(text="Free:")
row.prop(con, "free_axis", expand=True)
layout.prop(con, "volume")
row = layout.row()
row.label(text="Convert:")
row.prop(con, "owner_space", text="")
def COPY_TRANSFORMS(self, context, layout, con):
self.target_template(layout, con)
self.space_template(layout, con)
#def SCRIPT(self, context, layout, con):
def ACTION(self, context, layout, con):
self.target_template(layout, con)
split = layout.split()
col = split.column()
col.label(text="From Target:")
col.prop(con, "transform_channel", text="")
col.prop(con, "target_space", text="")
col = split.column()
col.label(text="To Action:")
col.prop(con, "action", text="")
col.prop(con, "use_bone_object_action")
split = layout.split()
col = split.column(align=True)
col.label(text="Target Range:")
col.prop(con, "min", text="Min")
col.prop(con, "max", text="Max")
col = split.column(align=True)
col.label(text="Action Range:")
col.prop(con, "frame_start", text="Start")
col.prop(con, "frame_end", text="End")
def LOCKED_TRACK(self, context, layout, con):
self.target_template(layout, con)
row = layout.row()
row.label(text="To:")
row.prop(con, "track_axis", expand=True)
row = layout.row()
row.label(text="Lock:")
row.prop(con, "lock_axis", expand=True)
def LIMIT_DISTANCE(self, context, layout, con):
self.target_template(layout, con)
col = layout.column(align=True)
col.prop(con, "distance")
col.operator("constraint.limitdistance_reset")
row = layout.row()
row.label(text="Clamp Region:")
row.prop(con, "limit_mode", text="")
row = layout.row()
row.prop(con, "use_transform_limit")
row.label()
self.space_template(layout, con)
def STRETCH_TO(self, context, layout, con):
self.target_template(layout, con)
row = layout.row()
row.prop(con, "rest_length", text="Rest Length")
row.operator("constraint.stretchto_reset", text="Reset")
layout.prop(con, "bulge", text="Volume Variation")
split = layout.split()
col = split.column(align=True)
col.prop(con, "use_bulge_min", text="Volume Min")
sub = col.column()
sub.active = con.use_bulge_min
sub.prop(con, "bulge_min", text="")
col = split.column(align=True)
col.prop(con, "use_bulge_max", text="Volume Max")
sub = col.column()
sub.active = con.use_bulge_max
sub.prop(con, "bulge_max", text="")
col = layout.column()
col.active = con.use_bulge_min or con.use_bulge_max
col.prop(con, "bulge_smooth", text="Smooth")
row = layout.row()
row.label(text="Volume:")
row.prop(con, "volume", expand=True)
row.label(text="Plane:")
row.prop(con, "keep_axis", expand=True)
def FLOOR(self, context, layout, con):
self.target_template(layout, con)
row = layout.row()
row.prop(con, "use_sticky")
row.prop(con, "use_rotation")
layout.prop(con, "offset")
row = layout.row()
row.label(text="Min/Max:")
row.prop(con, "floor_location", expand=True)
self.space_template(layout, con)
def RIGID_BODY_JOINT(self, context, layout, con):
self.target_template(layout, con, subtargets=False)
layout.prop(con, "pivot_type")
layout.prop(con, "child")
row = layout.row()
row.prop(con, "use_linked_collision", text="Linked Collision")
row.prop(con, "show_pivot", text="Display Pivot")
split = layout.split()
col = split.column(align=True)
col.label(text="Pivot:")
col.prop(con, "pivot_x", text="X")
col.prop(con, "pivot_y", text="Y")
col.prop(con, "pivot_z", text="Z")
col = split.column(align=True)
col.label(text="Axis:")
col.prop(con, "axis_x", text="X")
col.prop(con, "axis_y", text="Y")
col.prop(con, "axis_z", text="Z")
if con.pivot_type == 'CONE_TWIST':
layout.label(text="Limits:")
split = layout.split()
col = split.column()
col.prop(con, "use_angular_limit_x", text="Angle X")
sub = col.column()
sub.active = con.use_angular_limit_x
sub.prop(con, "limit_angle_max_x", text="")
col = split.column()
col.prop(con, "use_angular_limit_y", text="Angle Y")
sub = col.column()
sub.active = con.use_angular_limit_y
sub.prop(con, "limit_angle_max_y", text="")
col = split.column()
col.prop(con, "use_angular_limit_z", text="Angle Z")
sub = col.column()
sub.active = con.use_angular_limit_z
sub.prop(con, "limit_angle_max_z", text="")
elif con.pivot_type == 'GENERIC_6_DOF':
layout.label(text="Limits:")
split = layout.split()
col = split.column(align=True)
col.prop(con, "use_limit_x", text="X")
sub = col.column(align=True)
sub.active = con.use_limit_x
sub.prop(con, "limit_min_x", text="Min")
sub.prop(con, "limit_max_x", text="Max")
col = split.column(align=True)
col.prop(con, "use_limit_y", text="Y")
sub = col.column(align=True)
sub.active = con.use_limit_y
sub.prop(con, "limit_min_y", text="Min")
sub.prop(con, "limit_max_y", text="Max")
col = split.column(align=True)
col.prop(con, "use_limit_z", text="Z")
sub = col.column(align=True)
sub.active = con.use_limit_z
sub.prop(con, "limit_min_z", text="Min")
sub.prop(con, "limit_max_z", text="Max")
split = layout.split()
col = split.column(align=True)
col.prop(con, "use_angular_limit_x", text="Angle X")
sub = col.column(align=True)
sub.active = con.use_angular_limit_x
sub.prop(con, "limit_angle_min_x", text="Min")
sub.prop(con, "limit_angle_max_x", text="Max")
col = split.column(align=True)
col.prop(con, "use_angular_limit_y", text="Angle Y")
sub = col.column(align=True)
sub.active = con.use_angular_limit_y
sub.prop(con, "limit_angle_min_y", text="Min")
sub.prop(con, "limit_angle_max_y", text="Max")
col = split.column(align=True)
col.prop(con, "use_angular_limit_z", text="Angle Z")
sub = col.column(align=True)
sub.active = con.use_angular_limit_z
sub.prop(con, "limit_angle_min_z", text="Min")
sub.prop(con, "limit_angle_max_z", text="Max")
elif con.pivot_type == 'HINGE':
layout.label(text="Limits:")
split = layout.split()
row = split.row(align=True)
col = row.column()
col.prop(con, "use_angular_limit_x", text="Angle X")
col = row.column()
col.active = con.use_angular_limit_x
col.prop(con, "limit_angle_min_x", text="Min")
col = row.column()
col.active = con.use_angular_limit_x
col.prop(con, "limit_angle_max_x", text="Max")
def CLAMP_TO(self, context, layout, con):
self.target_template(layout, con)
row = layout.row()
row.label(text="Main Axis:")
row.prop(con, "main_axis", expand=True)
layout.prop(con, "use_cyclic")
def TRANSFORM(self, context, layout, con):
self.target_template(layout, con)
layout.prop(con, "use_motion_extrapolate", text="Extrapolate")
col = layout.column()
col.row().label(text="Source:")
col.row().prop(con, "map_from", expand=True)
split = layout.split()
ext = "" if con.map_from == 'LOCATION' else "_rot" if con.map_from == 'ROTATION' else "_scale"
sub = split.column(align=True)
sub.label(text="X:")
sub.prop(con, "from_min_x" + ext, text="Min")
sub.prop(con, "from_max_x" + ext, text="Max")
sub = split.column(align=True)
sub.label(text="Y:")
sub.prop(con, "from_min_y" + ext, text="Min")
sub.prop(con, "from_max_y" + ext, text="Max")
sub = split.column(align=True)
sub.label(text="Z:")
sub.prop(con, "from_min_z" + ext, text="Min")
sub.prop(con, "from_max_z" + ext, text="Max")
col = layout.column()
row = col.row()
row.label(text="Source to Destination Mapping:")
# note: chr(187) is the ASCII arrow ( >> ). Blender Text Editor can't
# open it. Thus we are using the hard-coded value instead.
row = col.row()
row.prop(con, "map_to_x_from", expand=False, text="")
row.label(text=" %s X" % chr(187))
row = col.row()
row.prop(con, "map_to_y_from", expand=False, text="")
row.label(text=" %s Y" % chr(187))
row = col.row()
row.prop(con, "map_to_z_from", expand=False, text="")
row.label(text=" %s Z" % chr(187))
split = layout.split()
col = split.column()
col.label(text="Destination:")
col.row().prop(con, "map_to", expand=True)
split = layout.split()
ext = "" if con.map_to == 'LOCATION' else "_rot" if con.map_to == 'ROTATION' else "_scale"
col = split.column()
col.label(text="X:")
sub = col.column(align=True)
sub.prop(con, "to_min_x" + ext, text="Min")
sub.prop(con, "to_max_x" + ext, text="Max")
col = split.column()
col.label(text="Y:")
sub = col.column(align=True)
sub.prop(con, "to_min_y" + ext, text="Min")
sub.prop(con, "to_max_y" + ext, text="Max")
col = split.column()
col.label(text="Z:")
sub = col.column(align=True)
sub.prop(con, "to_min_z" + ext, text="Min")
sub.prop(con, "to_max_z" + ext, text="Max")
self.space_template(layout, con)
def SHRINKWRAP(self, context, layout, con):
self.target_template(layout, con, False)
layout.prop(con, "distance")
layout.prop(con, "shrinkwrap_type")
if con.shrinkwrap_type == 'PROJECT':
row = layout.row(align=True)
row.prop(con, "project_axis", expand=True)
split = layout.split(percentage=0.4)
split.label(text="Axis Space:")
rowsub = split.row()
rowsub.prop(con, "project_axis_space", text="")
layout.prop(con, "project_limit")
def DAMPED_TRACK(self, context, layout, con):
self.target_template(layout, con)
row = layout.row()
row.label(text="To:")
row.prop(con, "track_axis", expand=True)
def SPLINE_IK(self, context, layout, con):
self.target_template(layout, con)
col = layout.column()
col.label(text="Spline Fitting:")
col.prop(con, "chain_count")
col.prop(con, "use_even_divisions")
col.prop(con, "use_chain_offset")
col = layout.column()
col.label(text="Chain Scaling:")
col.prop(con, "use_y_stretch")
col.prop(con, "use_curve_radius")
layout.prop(con, "xz_scale_mode")
if con.xz_scale_mode == 'VOLUME_PRESERVE':
layout.prop(con, "bulge", text="Volume Variation")
split = layout.split()
col = split.column(align=True)
col.prop(con, "use_bulge_min", text="Volume Min")
sub = col.column()
sub.active = con.use_bulge_min
sub.prop(con, "bulge_min", text="")
col = split.column(align=True)
col.prop(con, "use_bulge_max", text="Volume Max")
sub = col.column()
sub.active = con.use_bulge_max
sub.prop(con, "bulge_max", text="")
col = layout.column()
col.active = con.use_bulge_min or con.use_bulge_max
col.prop(con, "bulge_smooth", text="Smooth")
def PIVOT(self, context, layout, con):
self.target_template(layout, con)
if con.target:
col = layout.column()
col.prop(con, "offset", text="Pivot Offset")
else:
col = layout.column()
col.prop(con, "use_relative_location")
if con.use_relative_location:
col.prop(con, "offset", text="Relative Pivot Point")
else:
col.prop(con, "offset", text="Absolute Pivot Point")
col = layout.column()
col.prop(con, "rotation_range", text="Pivot When")
@staticmethod
def _getConstraintClip(context, con):
if not con.use_active_clip:
return con.clip
else:
return context.scene.active_clip
def FOLLOW_TRACK(self, context, layout, con):
clip = self._getConstraintClip(context, con)
row = layout.row()
row.prop(con, "use_active_clip")
row.prop(con, "use_3d_position")
sub = row.column()
sub.active = not con.use_3d_position
sub.prop(con, "use_undistorted_position")
col = layout.column()
if not con.use_active_clip:
col.prop(con, "clip")
row = col.row()
row.prop(con, "frame_method", expand=True)
if clip:
tracking = clip.tracking
col.prop_search(con, "object", tracking, "objects", icon='OBJECT_DATA')
tracking_object = tracking.objects.get(con.object, tracking.objects[0])
col.prop_search(con, "track", tracking_object, "tracks", icon='ANIM_DATA')
col.prop(con, "camera")
row = col.row()
row.active = not con.use_3d_position
row.prop(con, "depth_object")
layout.operator("clip.constraint_to_fcurve")
def CAMERA_SOLVER(self, context, layout, con):
layout.prop(con, "use_active_clip")
if not con.use_active_clip:
layout.prop(con, "clip")
layout.operator("clip.constraint_to_fcurve")
def OBJECT_SOLVER(self, context, layout, con):
clip = self._getConstraintClip(context, con)
layout.prop(con, "use_active_clip")
if not con.use_active_clip:
layout.prop(con, "clip")
if clip:
layout.prop_search(con, "object", clip.tracking, "objects", icon='OBJECT_DATA')
layout.prop(con, "camera")
row = layout.row()
row.operator("constraint.objectsolver_set_inverse")
row.operator("constraint.objectsolver_clear_inverse")
layout.operator("clip.constraint_to_fcurve")
def SCRIPT(self, context, layout, con):
layout.label("Blender 2.6 doesn't support python constraints yet")
class OBJECT_PT_constraints(ConstraintButtonsPanel, Panel):
bl_label = "Object Constraints"
bl_context = "constraint"
bl_options = {'HIDE_HEADER'}
@classmethod
def poll(cls, context):
return (context.object)
def draw(self, context):
layout = self.layout
obj = context.object
if obj.type == 'ARMATURE' and obj.mode == 'POSE':
box = layout.box()
box.alert = True # XXX: this should apply to the box background
box.label(icon='INFO', text="Constraints for active bone do not live here")
box.operator("wm.properties_context_change", icon='CONSTRAINT_BONE',
text="Go to Bone Constraints tab...").context = 'BONE_CONSTRAINT'
else:
layout.operator_menu_enum("object.constraint_add", "type", text="Add Object Constraint")
for con in obj.constraints:
self.draw_constraint(context, con)
class BONE_PT_constraints(ConstraintButtonsPanel, Panel):
bl_label = "Bone Constraints"
bl_context = "bone_constraint"
bl_options = {'HIDE_HEADER'}
@classmethod
def poll(cls, context):
return (context.pose_bone)
def draw(self, context):
layout = self.layout
layout.operator_menu_enum("pose.constraint_add", "type", text="Add Bone Constraint")
for con in context.pose_bone.constraints:
self.draw_constraint(context, con)
if __name__ == "__main__": # only for live edit.
bpy.utils.register_module(__name__)
|
gpl-3.0
|
bala4901/odoo
|
addons/event/event.py
|
17
|
23223
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
class event_type(osv.osv):
""" Event Type """
_name = 'event.type'
_description = __doc__
_columns = {
'name': fields.char('Event Type', size=64, required=True),
'default_reply_to': fields.char('Default Reply-To', size=64,help="The email address of the organizer which is put in the 'Reply-To' of all emails sent automatically at event or registrations confirmation. You can also put your email address of your mail gateway if you use one." ),
'default_email_event': fields.many2one('email.template','Event Confirmation Email', help="It will select this default confirmation event mail value when you choose this event"),
'default_email_registration': fields.many2one('email.template','Registration Confirmation Email', help="It will select this default confirmation registration mail value when you choose this event"),
'default_registration_min': fields.integer('Default Minimum Registration', help="It will select this default minimum value when you choose this event"),
'default_registration_max': fields.integer('Default Maximum Registration', help="It will select this default maximum value when you choose this event"),
}
_defaults = {
'default_registration_min': 0,
'default_registration_max': 0,
}
class event_event(osv.osv):
"""Event"""
_name = 'event.event'
_description = __doc__
_order = 'date_begin'
_inherit = ['mail.thread', 'ir.needaction_mixin']
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (long, int)):
ids = [ids]
res = []
for record in self.browse(cr, uid, ids, context=context):
date = record.date_begin.split(" ")[0]
date_end = record.date_end.split(" ")[0]
if date != date_end:
date += ' - ' + date_end
display_name = record.name + ' (' + date + ')'
res.append((record['id'], display_name))
return res
def copy(self, cr, uid, id, default=None, context=None):
""" Reset the state and the registrations while copying an event
"""
if not default:
default = {}
default.update({
'state': 'draft',
'registration_ids': False,
})
return super(event_event, self).copy(cr, uid, id, default=default, context=context)
def button_draft(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def button_cancel(self, cr, uid, ids, context=None):
registration = self.pool.get('event.registration')
reg_ids = registration.search(cr, uid, [('event_id','in',ids)], context=context)
for event_reg in registration.browse(cr,uid,reg_ids,context=context):
if event_reg.state == 'done':
raise osv.except_osv(_('Error!'),_("You have already set a registration for this event as 'Attended'. Please reset it to draft if you want to cancel this event.") )
registration.write(cr, uid, reg_ids, {'state': 'cancel'}, context=context)
return self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
def button_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'done'}, context=context)
def confirm_event(self, cr, uid, ids, context=None):
register_pool = self.pool.get('event.registration')
for event in self.browse(cr, uid, ids, context=context):
if event.email_confirmation_id:
#send reminder that will confirm the event for all the people that were already confirmed
reg_ids = register_pool.search(cr, uid, [
('event_id', '=', event.id),
('state', 'not in', ['draft', 'cancel'])], context=context)
register_pool.mail_user_confirm(cr, uid, reg_ids)
return self.write(cr, uid, ids, {'state': 'confirm'}, context=context)
def button_confirm(self, cr, uid, ids, context=None):
""" Confirm Event and send confirmation email to all register peoples
"""
return self.confirm_event(cr, uid, isinstance(ids, (int, long)) and [ids] or ids, context=context)
def _get_seats(self, cr, uid, ids, fields, args, context=None):
"""Get reserved, available, reserved but unconfirmed and used seats.
@return: Dictionary of function field values.
"""
keys = {'draft': 'seats_unconfirmed', 'open':'seats_reserved', 'done': 'seats_used'}
res = {}
for event_id in ids:
res[event_id] = {key:0 for key in keys.values()}
query = "SELECT state, sum(nb_register) FROM event_registration WHERE event_id = %s AND state IN ('draft','open','done') GROUP BY state"
for event in self.pool.get('event.event').browse(cr, uid, ids, context=context):
cr.execute(query, (event.id,))
reg_states = cr.fetchall()
for reg_state in reg_states:
res[event.id][keys[reg_state[0]]] = reg_state[1]
res[event.id]['seats_available'] = event.seats_max - \
(res[event.id]['seats_reserved'] + res[event.id]['seats_used']) \
if event.seats_max > 0 else None
return res
def _get_events_from_registrations(self, cr, uid, ids, context=None):
"""Get reserved, available, reserved but unconfirmed and used seats, of the event related to a registration.
@return: Dictionary of function field values.
"""
event_ids=set()
for registration in self.pool['event.registration'].browse(cr, uid, ids, context=context):
event_ids.add(registration.event_id.id)
return list(event_ids)
def _subscribe_fnc(self, cr, uid, ids, fields, args, context=None):
"""This functional fields compute if the current user (uid) is already subscribed or not to the event passed in parameter (ids)
"""
register_pool = self.pool.get('event.registration')
res = {}
for event in self.browse(cr, uid, ids, context=context):
res[event.id] = False
curr_reg_id = register_pool.search(cr, uid, [('user_id', '=', uid), ('event_id', '=' ,event.id)])
if curr_reg_id:
for reg in register_pool.browse(cr, uid, curr_reg_id, context=context):
if reg.state in ('open','done'):
res[event.id]= True
continue
return res
def _count_registrations(self, cr, uid, ids, field_name, arg, context=None):
return {
event.id: len(event.registration_ids)
for event in self.browse(cr, uid, ids, context=context)
}
_columns = {
'name': fields.char('Event Name', size=64, required=True, translate=True, readonly=False, states={'done': [('readonly', True)]}),
'user_id': fields.many2one('res.users', 'Responsible User', readonly=False, states={'done': [('readonly', True)]}),
'type': fields.many2one('event.type', 'Type of Event', readonly=False, states={'done': [('readonly', True)]}),
'seats_max': fields.integer('Maximum Avalaible Seats', oldname='register_max', help="You can for each event define a maximum registration level. If you have too much registrations you are not able to confirm your event. (put 0 to ignore this rule )", readonly=True, states={'draft': [('readonly', False)]}),
'seats_min': fields.integer('Minimum Reserved Seats', oldname='register_min', help="You can for each event define a minimum registration level. If you do not enough registrations you are not able to confirm your event. (put 0 to ignore this rule )", readonly=True, states={'draft': [('readonly', False)]}),
'seats_reserved': fields.function(_get_seats, oldname='register_current', string='Reserved Seats', type='integer', multi='seats_reserved',
store={'event.registration': (_get_events_from_registrations, ['state'], 10),
'event.event': (lambda self, cr, uid, ids, c = {}: ids, ['seats_max', 'registration_ids'], 20)}),
'seats_available': fields.function(_get_seats, oldname='register_avail', string='Available Seats', type='integer', multi='seats_reserved',
store={'event.registration': (_get_events_from_registrations, ['state'], 10),
'event.event': (lambda self, cr, uid, ids, c = {}: ids, ['seats_max', 'registration_ids'], 20)}),
'seats_unconfirmed': fields.function(_get_seats, oldname='register_prospect', string='Unconfirmed Seat Reservations', type='integer', multi='seats_reserved',
store={'event.registration': (_get_events_from_registrations, ['state'], 10),
'event.event': (lambda self, cr, uid, ids, c = {}: ids, ['seats_max', 'registration_ids'], 20)}),
'seats_used': fields.function(_get_seats, oldname='register_attended', string='Number of Participations', type='integer', multi='seats_reserved',
store={'event.registration': (_get_events_from_registrations, ['state'], 10),
'event.event': (lambda self, cr, uid, ids, c = {}: ids, ['seats_max', 'registration_ids'], 20)}),
'registration_ids': fields.one2many('event.registration', 'event_id', 'Registrations', readonly=False, states={'done': [('readonly', True)]}),
'date_begin': fields.datetime('Start Date', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'date_end': fields.datetime('End Date', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection([
('draft', 'Unconfirmed'),
('cancel', 'Cancelled'),
('confirm', 'Confirmed'),
('done', 'Done')],
'Status', readonly=True, required=True,
help='If event is created, the status is \'Draft\'.If event is confirmed for the particular dates the status is set to \'Confirmed\'. If the event is over, the status is set to \'Done\'.If event is cancelled the status is set to \'Cancelled\'.'),
'email_registration_id' : fields.many2one('email.template','Registration Confirmation Email', help='This field contains the template of the mail that will be automatically sent each time a registration for this event is confirmed.'),
'email_confirmation_id' : fields.many2one('email.template','Event Confirmation Email', help="If you set an email template, each participant will receive this email announcing the confirmation of the event."),
'reply_to': fields.char('Reply-To Email', size=64, readonly=False, states={'done': [('readonly', True)]}, help="The email address of the organizer is likely to be put here, with the effect to be in the 'Reply-To' of the mails sent automatically at event or registrations confirmation. You can also put the email address of your mail gateway if you use one."),
'address_id': fields.many2one('res.partner','Location', readonly=False, states={'done': [('readonly', True)]}),
'country_id': fields.related('address_id', 'country_id',
type='many2one', relation='res.country', string='Country', readonly=False, states={'done': [('readonly', True)]}, store=True),
'description': fields.html(
'Description', readonly=False, translate=True,
states={'done': [('readonly', True)]},
oldname='note'),
'company_id': fields.many2one('res.company', 'Company', required=False, change_default=True, readonly=False, states={'done': [('readonly', True)]}),
'is_subscribed' : fields.function(_subscribe_fnc, type="boolean", string='Subscribed'),
'organizer_id': fields.many2one('res.partner', "Organizer"),
'count_registrations': fields.function(_count_registrations, type="integer", string="Registrations"),
}
_defaults = {
'state': 'draft',
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'event.event', context=c),
'user_id': lambda obj, cr, uid, context: uid,
'organizer_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, context=c).company_id.partner_id.id,
'address_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, context=c).company_id.partner_id.id
}
def _check_seats_limit(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context=context):
if event.seats_max and event.seats_available < 0:
return False
return True
_constraints = [
(_check_seats_limit, 'No more available seats.', ['registration_ids','seats_max']),
]
def subscribe_to_event(self, cr, uid, ids, context=None):
register_pool = self.pool.get('event.registration')
user_pool = self.pool.get('res.users')
num_of_seats = int(context.get('ticket', 1))
user = user_pool.browse(cr, uid, uid, context=context)
curr_reg_ids = register_pool.search(cr, uid, [('user_id', '=', user.id), ('event_id', '=' , ids[0])])
#the subscription is done with SUPERUSER_ID because in case we share the kanban view, we want anyone to be able to subscribe
if not curr_reg_ids:
curr_reg_ids = [register_pool.create(cr, SUPERUSER_ID, {'event_id': ids[0] ,'email': user.email, 'name':user.name, 'user_id': user.id, 'nb_register': num_of_seats})]
else:
register_pool.write(cr, uid, curr_reg_ids, {'nb_register': num_of_seats}, context=context)
return register_pool.confirm_registration(cr, SUPERUSER_ID, curr_reg_ids, context=context)
def unsubscribe_to_event(self, cr, uid, ids, context=None):
register_pool = self.pool.get('event.registration')
#the unsubscription is done with SUPERUSER_ID because in case we share the kanban view, we want anyone to be able to unsubscribe
curr_reg_ids = register_pool.search(cr, SUPERUSER_ID, [('user_id', '=', uid), ('event_id', '=', ids[0])])
return register_pool.button_reg_cancel(cr, SUPERUSER_ID, curr_reg_ids, context=context)
def _check_closing_date(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context=context):
if event.date_end < event.date_begin:
return False
return True
_constraints = [
(_check_closing_date, 'Error ! Closing Date cannot be set before Beginning Date.', ['date_end']),
]
def onchange_event_type(self, cr, uid, ids, type_event, context=None):
values = {}
if type_event:
type_info = self.pool.get('event.type').browse(cr,uid,type_event,context)
dic ={
'reply_to': type_info.default_reply_to,
'email_registration_id': type_info.default_email_registration.id,
'email_confirmation_id': type_info.default_email_event.id,
'seats_min': type_info.default_registration_min,
'seats_max': type_info.default_registration_max,
}
values.update(dic)
return values
def onchange_start_date(self, cr, uid, ids, date_begin=False, date_end=False, context=None):
res = {'value':{}}
if date_end:
return res
if date_begin and isinstance(date_begin, str):
date_begin = datetime.strptime(date_begin, "%Y-%m-%d %H:%M:%S")
date_end = date_begin + timedelta(hours=1)
res['value'] = {'date_end': date_end.strftime("%Y-%m-%d %H:%M:%S")}
return res
class event_registration(osv.osv):
"""Event Registration"""
_name= 'event.registration'
_description = __doc__
_inherit = ['mail.thread', 'ir.needaction_mixin']
_columns = {
'id': fields.integer('ID'),
'origin': fields.char('Source Document', size=124,readonly=True,help="Reference of the sales order which created the registration"),
'nb_register': fields.integer('Number of Participants', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'event_id': fields.many2one('event.event', 'Event', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'partner_id': fields.many2one('res.partner', 'Partner', states={'done': [('readonly', True)]}),
'create_date': fields.datetime('Creation Date' , readonly=True),
'date_closed': fields.datetime('Attended Date', readonly=True),
'date_open': fields.datetime('Registration Date', readonly=True),
'reply_to': fields.related('event_id','reply_to',string='Reply-to Email', type='char', size=128, readonly=True,),
'log_ids': fields.one2many('mail.message', 'res_id', 'Logs', domain=[('model','=',_name)]),
'event_end_date': fields.related('event_id','date_end', type='datetime', string="Event End Date", readonly=True),
'event_begin_date': fields.related('event_id', 'date_begin', type='datetime', string="Event Start Date", readonly=True),
'user_id': fields.many2one('res.users', 'User', states={'done': [('readonly', True)]}),
'company_id': fields.related('event_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True, states={'draft':[('readonly',False)]}),
'state': fields.selection([('draft', 'Unconfirmed'),
('cancel', 'Cancelled'),
('open', 'Confirmed'),
('done', 'Attended')], 'Status',
size=16, readonly=True),
'email': fields.char('Email', size=64),
'phone': fields.char('Phone', size=64),
'name': fields.char('Name', size=128, select=True),
}
_defaults = {
'nb_register': 1,
'state': 'draft',
}
_order = 'name, create_date desc'
def _check_seats_limit(self, cr, uid, ids, context=None):
for registration in self.browse(cr, uid, ids, context=context):
if registration.event_id.seats_max and \
registration.event_id.seats_available < (registration.state == 'draft' and registration.nb_register or 0):
return False
return True
_constraints = [
(_check_seats_limit, 'No more available seats.', ['event_id','nb_register','state']),
]
def do_draft(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def confirm_registration(self, cr, uid, ids, context=None):
for reg in self.browse(cr, uid, ids, context=context or {}):
self.pool.get('event.event').message_post(cr, uid, [reg.event_id.id], body=_('New registration confirmed: %s.') % (reg.name or '', ),subtype="event.mt_event_registration", context=context)
self.message_post(cr, uid, reg.id, body=_('Event Registration confirmed.'), context=context)
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
def registration_open(self, cr, uid, ids, context=None):
""" Open Registration
"""
res = self.confirm_registration(cr, uid, ids, context=context)
self.mail_user(cr, uid, ids, context=context)
return res
def button_reg_close(self, cr, uid, ids, context=None):
""" Close Registration
"""
if context is None:
context = {}
today = fields.datetime.now()
for registration in self.browse(cr, uid, ids, context=context):
if today >= registration.event_id.date_begin:
values = {'state': 'done', 'date_closed': today}
self.write(cr, uid, ids, values)
else:
raise osv.except_osv(_('Error!'), _("You must wait for the starting day of the event to do this action."))
return True
def button_reg_cancel(self, cr, uid, ids, context=None, *args):
return self.write(cr, uid, ids, {'state': 'cancel'})
def mail_user(self, cr, uid, ids, context=None):
"""
Send email to user with email_template when registration is done
"""
for registration in self.browse(cr, uid, ids, context=context):
if registration.event_id.state == 'confirm' and registration.event_id.email_confirmation_id.id:
self.mail_user_confirm(cr, uid, ids, context=context)
else:
template_id = registration.event_id.email_registration_id.id
if template_id:
mail_message = self.pool.get('email.template').send_mail(cr,uid,template_id,registration.id)
return True
def mail_user_confirm(self, cr, uid, ids, context=None):
"""
Send email to user when the event is confirmed
"""
for registration in self.browse(cr, uid, ids, context=context):
template_id = registration.event_id.email_confirmation_id.id
if template_id:
mail_message = self.pool.get('email.template').send_mail(cr,uid,template_id,registration.id)
return True
def onchange_contact_id(self, cr, uid, ids, contact, partner, context=None):
if not contact:
return {}
addr_obj = self.pool.get('res.partner')
contact_id = addr_obj.browse(cr, uid, contact, context=context)
return {'value': {
'email':contact_id.email,
'name':contact_id.name,
'phone':contact_id.phone,
}}
def onchange_partner_id(self, cr, uid, ids, part, context=None):
res_obj = self.pool.get('res.partner')
data = {}
if not part:
return {'value': data}
addr = res_obj.address_get(cr, uid, [part]).get('default', False)
if addr:
d = self.onchange_contact_id(cr, uid, ids, addr, part, context)
data.update(d['value'])
return {'value': data}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
vovojh/gem5
|
src/mem/slicc/ast/CheckAllocateStatementAST.py
|
91
|
2239
|
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.StatementAST import StatementAST
class CheckAllocateStatementAST(StatementAST):
def __init__(self, slicc, variable):
super(StatementAST, self).__init__(slicc)
self.variable = variable
def __repr__(self):
return "[CheckAllocateStatementAst: %r]" % self.variable
def generate(self, code, return_type):
# FIXME - check the type of the variable
# Make sure the variable is valid
self.variable.var
def findResources(self, resources):
var = self.variable.var
res_count = int(resources.get(var, 0))
resources[var] = str(res_count + 1)
|
bsd-3-clause
|
basicthinker/Sexain-MemController
|
gem5-stable/src/arch/x86/isa/insts/general_purpose/compare_and_test/__init__.py
|
91
|
2398
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = ["bit_scan",
"bit_test",
"bounds",
"compare",
"set_byte_on_condition",
"test"]
microcode = ""
for category in categories:
exec "import %s as cat" % category
microcode += cat.microcode
|
apache-2.0
|
Pastafarians/linguine-python
|
linguine/ops/tfidf.py
|
4
|
2232
|
#!/usr/bin/env python
"""
Given a set of texts, calculates the TF-IDF for each word-text pair in the set.
Returns: A list of dictionaries containing a term, a corpus id, the term's importance in that corpus.
Given: A list of corpuses
"""
import math, nltk, re, pprint
from linguine.transaction_exception import TransactionException
class Tfidf:
def __init__(self):
# a list of (words-freq) pairs for each document
self.global_terms_in_doc = {}
# list to hold occurrences of terms across documents
self.global_term_freq = {}
self.num_docs = 0
def run(self, data):
self.num_docs = len(data)
try:
results = []
for corpus in data:
terms_in_doc = {}
tokens = corpus.tokenized_contents
for word in tokens:
if word in terms_in_doc:
terms_in_doc[word] += 1
else:
terms_in_doc[word] = 1
for (word, freq) in terms_in_doc.items():
#If the word appears in a doc, increment the # of docs containing the word by 1
if word in self.global_term_freq:
self.global_term_freq[word] += 1
else:
self.global_term_freq[word] = 1
self.global_terms_in_doc[corpus.id] = terms_in_doc
for corpus in data:
max_freq = 0
for (term, freq) in self.global_terms_in_doc[corpus.id].items():
if freq > max_freq:
max_freq = freq
for (term, freq) in self.global_terms_in_doc[corpus.id].items():
idf = math.log(float(self.num_docs) / float(1 + self.global_term_freq[term]))
tfidf = float(freq) / float(max_freq) * float(idf)
results.append({ 'corpus_id' : corpus.id, 'term' : term, 'importance' : tfidf })
return results
except LookupError:
raise TransactionException('NLTK \'Punkt\' Model not installed.', 500)
except TypeError:
raise TransactionException('Corpus contents does not exist.')
|
mit
|
benbahrenburg/titanium_mobile
|
support/common/mako/_ast_util.py
|
63
|
25415
|
# -*- coding: utf-8 -*-
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
BOOLOP_SYMBOLS = {
And: 'and',
Or: 'or'
}
BINOP_SYMBOLS = {
Add: '+',
Sub: '-',
Mult: '*',
Div: '/',
FloorDiv: '//',
Mod: '%',
LShift: '<<',
RShift: '>>',
BitOr: '|',
BitAnd: '&',
BitXor: '^'
}
CMPOP_SYMBOLS = {
Eq: '==',
Gt: '>',
GtE: '>=',
In: 'in',
Is: 'is',
IsNot: 'is not',
Lt: '<',
LtE: '<=',
NotEq: '!=',
NotIn: 'not in'
}
UNARYOP_SYMBOLS = {
Invert: '~',
Not: 'not',
UAdd: '+',
USub: '-'
}
ALL_SYMBOLS = {}
ALL_SYMBOLS.update(BOOLOP_SYMBOLS)
ALL_SYMBOLS.update(BINOP_SYMBOLS)
ALL_SYMBOLS.update(CMPOP_SYMBOLS)
ALL_SYMBOLS.update(UNARYOP_SYMBOLS)
def parse(expr, filename='<unknown>', mode='exec'):
"""Parse an expression into an AST node."""
return compile(expr, filename, mode, PyCF_ONLY_AST)
def to_source(node, indent_with=' ' * 4):
"""
This function can convert a node tree back into python sourcecode. This
is useful for debugging purposes, especially if you're dealing with custom
asts not generated by python itself.
It could be that the sourcecode is evaluable when the AST itself is not
compilable / evaluable. The reason for this is that the AST contains some
more data than regular sourcecode does, which is dropped during
conversion.
Each level of indentation is replaced with `indent_with`. Per default this
parameter is equal to four spaces as suggested by PEP 8, but it might be
adjusted to match the application's styleguide.
"""
generator = SourceGenerator(indent_with)
generator.visit(node)
return ''.join(generator.result)
def dump(node):
"""
A very verbose representation of the node passed. This is useful for
debugging purposes.
"""
def _format(node):
if isinstance(node, AST):
return '%s(%s)' % (node.__class__.__name__,
', '.join('%s=%s' % (a, _format(b))
for a, b in iter_fields(node)))
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy the source location hint (`lineno` and `col_offset`) from the
old to the new node if possible and return the new one.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
Some nodes require a line number and the column offset. Without that
information the compiler will abort the compilation. Because it can be
a dull task to add appropriate line numbers and column offsets when
adding new nodes this function can help. It copies the line number and
column offset of the parent node to the child nodes without this
information.
Unlike `copy_location` this works recursive and won't touch nodes that
already have a location information.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line numbers of all nodes by `n` if they have line number
attributes. This is useful to "move code" to a different location in a
file.
"""
for node in zip((node,), walk(node)):
if 'lineno' in node._attributes:
node.lineno = getattr(node, 'lineno', 0) + n
def iter_fields(node):
"""Iterate over all fields of a node, only yielding existing fields."""
# CPython 2.5 compat
if not hasattr(node, '_fields') or not node._fields:
return
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def get_fields(node):
"""Like `iter_fiels` but returns a dict."""
return dict(iter_fields(node))
def iter_child_nodes(node):
"""Iterate over all child nodes or a node."""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_child_nodes(node):
"""Like `iter_child_nodes` but returns a list."""
return list(iter_child_nodes(node))
def get_compile_mode(node):
"""
Get the mode for `compile` of a given node. If the node is not a `mod`
node (`Expression`, `Module` etc.) a `TypeError` is thrown.
"""
if not isinstance(node, mod):
raise TypeError('expected mod node, got %r' % node.__class__.__name__)
return {
Expression: 'eval',
Interactive: 'single'
}.get(node.__class__, 'expr')
def get_docstring(node):
"""
Return the docstring for the given node or `None` if no docstring can be
found. If the node provided does not accept docstrings a `TypeError`
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Str):
return node.body[0].s
def walk(node):
"""
Iterate over all nodes. This is useful if you only want to modify nodes in
place and don't care about the context or the order the nodes are returned.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
Walks the abstract syntax tree and call visitor functions for every node
found. The visitor functions may return values which will be forwarded
by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def get_visitor(self, node):
"""
Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node)
return self.generic_visit(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
Here an example transformer that rewrites all `foo` to `data['foo']`::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes
you must either transform the child nodes yourself or call the generic
visit function for the node first.
Nodes that were part of a collection of statements (that applies to
all statement nodes) may also return a list of nodes rather than just
a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
class SourceGenerator(NodeVisitor):
"""
This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
`node_to_source` function.
"""
def __init__(self, indent_with):
self.result = []
self.indent_with = indent_with
self.indentation = 0
self.new_lines = 0
def write(self, x):
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(x)
def newline(self, n=1):
self.new_lines = max(self.new_lines, n)
def body(self, statements):
self.new_line = True
self.indentation += 1
for stmt in statements:
self.visit(stmt)
self.indentation -= 1
def body_or_else(self, node):
self.body(node.body)
if node.orelse:
self.newline()
self.write('else:')
self.body(node.orelse)
def signature(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
padding = [None] * (len(node.args) - len(node.defaults))
for arg, default in zip(node.args, padding + node.defaults):
write_comma()
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.vararg is not None:
write_comma()
self.write('*' + node.vararg)
if node.kwarg is not None:
write_comma()
self.write('**' + node.kwarg)
def decorators(self, node):
for decorator in node.decorator_list:
self.newline()
self.write('@')
self.visit(decorator)
# Statements
def visit_Assign(self, node):
self.newline()
for idx, target in enumerate(node.targets):
if idx:
self.write(', ')
self.visit(target)
self.write(' = ')
self.visit(node.value)
def visit_AugAssign(self, node):
self.newline()
self.visit(node.target)
self.write(BINOP_SYMBOLS[type(node.op)] + '=')
self.visit(node.value)
def visit_ImportFrom(self, node):
self.newline()
self.write('from %s%s import ' % ('.' * node.level, node.module))
for idx, item in enumerate(node.names):
if idx:
self.write(', ')
self.write(item)
def visit_Import(self, node):
self.newline()
for item in node.names:
self.write('import ')
self.visit(item)
def visit_Expr(self, node):
self.newline()
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.newline(n=2)
self.decorators(node)
self.newline()
self.write('def %s(' % node.name)
self.signature(node.args)
self.write('):')
self.body(node.body)
def visit_ClassDef(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(', ')
else:
have_args.append(True)
self.write('(')
self.newline(n=3)
self.decorators(node)
self.newline()
self.write('class %s' % node.name)
for base in node.bases:
paren_or_comma()
self.visit(base)
# XXX: the if here is used to keep this module compatible
# with python 2.6.
if hasattr(node, 'keywords'):
for keyword in node.keywords:
paren_or_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
paren_or_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
paren_or_comma()
self.write('**')
self.visit(node.kwargs)
self.write(have_args and '):' or ':')
self.body(node.body)
def visit_If(self, node):
self.newline()
self.write('if ')
self.visit(node.test)
self.write(':')
self.body(node.body)
while True:
else_ = node.orelse
if len(else_) == 1 and isinstance(else_[0], If):
node = else_[0]
self.newline()
self.write('elif ')
self.visit(node.test)
self.write(':')
self.body(node.body)
else:
self.newline()
self.write('else:')
self.body(else_)
break
def visit_For(self, node):
self.newline()
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.body_or_else(node)
def visit_While(self, node):
self.newline()
self.write('while ')
self.visit(node.test)
self.write(':')
self.body_or_else(node)
def visit_With(self, node):
self.newline()
self.write('with ')
self.visit(node.context_expr)
if node.optional_vars is not None:
self.write(' as ')
self.visit(node.optional_vars)
self.write(':')
self.body(node.body)
def visit_Pass(self, node):
self.newline()
self.write('pass')
def visit_Print(self, node):
# XXX: python 2.6 only
self.newline()
self.write('print ')
want_comma = False
if node.dest is not None:
self.write(' >> ')
self.visit(node.dest)
want_comma = True
for value in node.values:
if want_comma:
self.write(', ')
self.visit(value)
want_comma = True
if not node.nl:
self.write(',')
def visit_Delete(self, node):
self.newline()
self.write('del ')
for idx, target in enumerate(node):
if idx:
self.write(', ')
self.visit(target)
def visit_TryExcept(self, node):
self.newline()
self.write('try:')
self.body(node.body)
for handler in node.handlers:
self.visit(handler)
def visit_TryFinally(self, node):
self.newline()
self.write('try:')
self.body(node.body)
self.newline()
self.write('finally:')
self.body(node.finalbody)
def visit_Global(self, node):
self.newline()
self.write('global ' + ', '.join(node.names))
def visit_Nonlocal(self, node):
self.newline()
self.write('nonlocal ' + ', '.join(node.names))
def visit_Return(self, node):
self.newline()
self.write('return ')
self.visit(node.value)
def visit_Break(self, node):
self.newline()
self.write('break')
def visit_Continue(self, node):
self.newline()
self.write('continue')
def visit_Raise(self, node):
# XXX: Python 2.6 / 3.0 compatibility
self.newline()
self.write('raise')
if hasattr(node, 'exc') and node.exc is not None:
self.write(' ')
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
elif hasattr(node, 'type') and node.type is not None:
self.visit(node.type)
if node.inst is not None:
self.write(', ')
self.visit(node.inst)
if node.tback is not None:
self.write(', ')
self.visit(node.tback)
# Expressions
def visit_Attribute(self, node):
self.visit(node.value)
self.write('.' + node.attr)
def visit_Call(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
self.visit(node.func)
self.write('(')
for arg in node.args:
write_comma()
self.visit(arg)
for keyword in node.keywords:
write_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
write_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
write_comma()
self.write('**')
self.visit(node.kwargs)
self.write(')')
def visit_Name(self, node):
self.write(node.id)
def visit_Str(self, node):
self.write(repr(node.s))
def visit_Bytes(self, node):
self.write(repr(node.s))
def visit_Num(self, node):
self.write(repr(node.n))
def visit_Tuple(self, node):
self.write('(')
idx = -1
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(idx and ')' or ',)')
def sequence_visit(left, right):
def visit(self, node):
self.write(left)
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(right)
return visit
visit_List = sequence_visit('[', ']')
visit_Set = sequence_visit('{', '}')
del sequence_visit
def visit_Dict(self, node):
self.write('{')
for idx, (key, value) in enumerate(zip(node.keys, node.values)):
if idx:
self.write(', ')
self.visit(key)
self.write(': ')
self.visit(value)
self.write('}')
def visit_BinOp(self, node):
self.write('(')
self.visit(node.left)
self.write(' %s ' % BINOP_SYMBOLS[type(node.op)])
self.visit(node.right)
self.write(')')
def visit_BoolOp(self, node):
self.write('(')
for idx, value in enumerate(node.values):
if idx:
self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)])
self.visit(value)
self.write(')')
def visit_Compare(self, node):
self.write('(')
self.visit(node.left)
for op, right in zip(node.ops, node.comparators):
self.write(' %s ' % CMPOP_SYMBOLS[type(op)])
self.visit(right)
self.write(')')
def visit_UnaryOp(self, node):
self.write('(')
op = UNARYOP_SYMBOLS[type(node.op)]
self.write(op)
if op == 'not':
self.write(' ')
self.visit(node.operand)
self.write(')')
def visit_Subscript(self, node):
self.visit(node.value)
self.write('[')
self.visit(node.slice)
self.write(']')
def visit_Slice(self, node):
if node.lower is not None:
self.visit(node.lower)
self.write(':')
if node.upper is not None:
self.visit(node.upper)
if node.step is not None:
self.write(':')
if not (isinstance(node.step, Name) and node.step.id == 'None'):
self.visit(node.step)
def visit_ExtSlice(self, node):
for idx, item in node.dims:
if idx:
self.write(', ')
self.visit(item)
def visit_Yield(self, node):
self.write('yield ')
self.visit(node.value)
def visit_Lambda(self, node):
self.write('lambda ')
self.signature(node.args)
self.write(': ')
self.visit(node.body)
def visit_Ellipsis(self, node):
self.write('Ellipsis')
def generator_visit(left, right):
def visit(self, node):
self.write(left)
self.visit(node.elt)
for comprehension in node.generators:
self.visit(comprehension)
self.write(right)
return visit
visit_ListComp = generator_visit('[', ']')
visit_GeneratorExp = generator_visit('(', ')')
visit_SetComp = generator_visit('{', '}')
del generator_visit
def visit_DictComp(self, node):
self.write('{')
self.visit(node.key)
self.write(': ')
self.visit(node.value)
for comprehension in node.generators:
self.visit(comprehension)
self.write('}')
def visit_IfExp(self, node):
self.visit(node.body)
self.write(' if ')
self.visit(node.test)
self.write(' else ')
self.visit(node.orelse)
def visit_Starred(self, node):
self.write('*')
self.visit(node.value)
def visit_Repr(self, node):
# XXX: python 2.6 only
self.write('`')
self.visit(node.value)
self.write('`')
# Helper Nodes
def visit_alias(self, node):
self.write(node.name)
if node.asname is not None:
self.write(' as ' + node.asname)
def visit_comprehension(self, node):
self.write(' for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
if node.ifs:
for if_ in node.ifs:
self.write(' if ')
self.visit(if_)
def visit_excepthandler(self, node):
self.newline()
self.write('except')
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.name is not None:
self.write(' as ')
self.visit(node.name)
self.write(':')
self.body(node.body)
|
apache-2.0
|
chubbymaggie/pmu-tools
|
parser/elf.py
|
4
|
3750
|
#!/usr/bin/python
# resolve ELF and DWARF symbol tables using elftools
#
# Copyright (c) 2013-2014, Intel Corporation
# Author: Andi Kleen
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
from elftools.common.py3compat import maxint, bytes2str
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
import util
import kernel
# global caches
open_files = dict()
resolved = dict()
symtables = dict()
lines = dict()
def build_line_table(dwarfinfo):
lines = []
for CU in dwarfinfo.iter_CUs():
lp = dwarfinfo.line_program_for_CU(CU)
prevstate = None
for entry in lp.get_entries():
if entry.state is None or entry.state.end_sequence:
continue
if prevstate:
lines.append((prevstate.address,
entry.state.address,
lp['file_entry'][prevstate.file - 1].name,
prevstate.line))
prevstate = entry.state
lines.sort()
return lines
def build_symtab(elffile):
syms = []
for section in elffile.iter_sections():
if isinstance(section, SymbolTableSection):
for nsym, sym in enumerate(section.iter_symbols()):
name = bytes2str(sym.name)
if not name:
continue
if sym.entry.st_info.type != 'STT_FUNC':
continue
end = sym['st_value'] + sym['st_size']
syms.append((sym['st_value'], end,
bytes2str(sym.name)))
syms.sort()
return syms
def find_elf_file(fn):
if fn in open_files:
elffile = open_files[fn]
else:
f = open(fn, 'rb')
elffile = ELFFile(f)
open_files[fn] = elffile
return elffile
def resolve_line(fn, ip):
elffile = find_elf_file(fn)
if fn not in lines and elffile.has_dwarf_info():
lines[fn] = build_line_table(elffile.get_dwarf_info())
src = None
if resolve_line and fn in lines:
pos = util.find_le(lines[fn], ip)
if pos:
src = "%s:%d" % (pos[2], pos[3])
return src
# global one hit cache
# helps a lot for LBR decoding
# tbd use a small list with LRU?
last_sym = None
def resolve_sym(fn, ip):
elffile = find_elf_file(fn)
global last_sym
if fn not in symtables:
symtables[fn] = build_symtab(elffile)
if last_sym and last_sym[0] <= ip <= last_sym[1]:
return last_sym[2], ip - last_sym[0]
loc = None
offset = None
if fn in symtables:
sym = util.find_le(symtables[fn], ip)
if sym:
loc, offset = sym[2], ip - sym[0]
return loc, offset
def resolve_ip(filename, foffset, ip, need_line):
sym, soffset, line = None, 0, None
if filename and filename.startswith("/"):
sym, soffset = resolve_sym(filename, foffset)
if not sym:
sym, soffset = resolve_sym(filename, ip)
if need_line:
line = resolve_line(filename, ip)
else:
sym, soffset = kernel.resolve_kernel(ip)
return sym, soffset, line
if __name__ == '__main__':
import sys
print resolve_addr(sys.argv[1], int(sys.argv[2], 16))
print resolve_line(sys.argv[1], int(sys.argv[2], 16))
|
gpl-2.0
|
nirmeshk/oh-mainline
|
vendor/packages/kombu/kombu/pools.py
|
38
|
3812
|
"""
kombu.pools
===========
Public resource pools.
"""
from __future__ import absolute_import
import os
from itertools import chain
from .connection import Resource
from .five import range, values
from .messaging import Producer
from .utils import EqualityDict
from .utils.functional import lazy
__all__ = ['ProducerPool', 'PoolGroup', 'register_group',
'connections', 'producers', 'get_limit', 'set_limit', 'reset']
_limit = [200]
_used = [False]
_groups = []
use_global_limit = object()
disable_limit_protection = os.environ.get('KOMBU_DISABLE_LIMIT_PROTECTION')
class ProducerPool(Resource):
Producer = Producer
def __init__(self, connections, *args, **kwargs):
self.connections = connections
self.Producer = kwargs.pop('Producer', None) or self.Producer
super(ProducerPool, self).__init__(*args, **kwargs)
def _acquire_connection(self):
return self.connections.acquire(block=True)
def create_producer(self):
conn = self._acquire_connection()
try:
return self.Producer(conn)
except BaseException:
conn.release()
raise
def new(self):
return lazy(self.create_producer)
def setup(self):
if self.limit:
for _ in range(self.limit):
self._resource.put_nowait(self.new())
def close_resource(self, resource):
pass
def prepare(self, p):
if callable(p):
p = p()
if p._channel is None:
conn = self._acquire_connection()
try:
p.revive(conn)
except BaseException:
conn.release()
raise
return p
def release(self, resource):
if resource.__connection__:
resource.__connection__.release()
resource.channel = None
super(ProducerPool, self).release(resource)
class PoolGroup(EqualityDict):
def __init__(self, limit=None):
self.limit = limit
def create(self, resource, limit):
raise NotImplementedError('PoolGroups must define ``create``')
def __missing__(self, resource):
limit = self.limit
if limit is use_global_limit:
limit = get_limit()
if not _used[0]:
_used[0] = True
k = self[resource] = self.create(resource, limit)
return k
def register_group(group):
_groups.append(group)
return group
class Connections(PoolGroup):
def create(self, connection, limit):
return connection.Pool(limit=limit)
connections = register_group(Connections(limit=use_global_limit))
class Producers(PoolGroup):
def create(self, connection, limit):
return ProducerPool(connections[connection], limit=limit)
producers = register_group(Producers(limit=use_global_limit))
def _all_pools():
return chain(*[(values(g) if g else iter([])) for g in _groups])
def get_limit():
return _limit[0]
def set_limit(limit, force=False, reset_after=False):
limit = limit or 0
glimit = _limit[0] or 0
if limit < glimit:
if not disable_limit_protection and (_used[0] and not force):
raise RuntimeError("Can't lower limit after pool in use.")
reset_after = True
if limit != glimit:
_limit[0] = limit
for pool in _all_pools():
pool.limit = limit
if reset_after:
reset()
return limit
def reset(*args, **kwargs):
for pool in _all_pools():
try:
pool.force_close_all()
except Exception:
pass
for group in _groups:
group.clear()
_used[0] = False
try:
from multiprocessing.util import register_after_fork
register_after_fork(connections, reset)
except ImportError: # pragma: no cover
pass
|
agpl-3.0
|
pombredanne/pants
|
contrib/node/src/python/pants/contrib/node/targets/node_package.py
|
18
|
1308
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
from pants.build_graph.target import Target
class NodePackage(Target):
"""Represents an abstract Node package, currently handled by NPM internally.
All Node packages have a package name whether they are local or remote so this serves as a base
class for all concrete manifestations of packages.
"""
def __init__(self, package_name=None, address=None, payload=None, **kwargs):
"""
:param string package_name: The remote module package name, if not supplied the target name is
used.
"""
payload = payload or Payload()
payload.add_fields({
'package_name': PrimitiveField(package_name or address.target_name),
})
super(NodePackage, self).__init__(address=address, payload=payload, **kwargs)
@property
def package_name(self):
"""The name of the remote module package.
:rtype: string
"""
return self.payload.package_name
|
apache-2.0
|
johnkit/vtk-dev
|
ThirdParty/Twisted/twisted/internet/abstract.py
|
23
|
18684
|
# -*- test-case-name: twisted.test.test_abstract -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Support for generic select()able objects.
"""
from __future__ import division, absolute_import
from socket import AF_INET6, inet_pton, error
from zope.interface import implementer
# Twisted Imports
from twisted.python.compat import _PY3, unicode, lazyByteSlice
from twisted.python import reflect, failure
from twisted.internet import interfaces, main
if _PY3:
def _concatenate(bObj, offset, bArray):
# Python 3 lacks the buffer() builtin and the other primitives don't
# help in this case. Just do the copy. Perhaps later these buffers can
# be joined and FileDescriptor can use writev(). Or perhaps bytearrays
# would help.
return bObj[offset:] + b"".join(bArray)
else:
def _concatenate(bObj, offset, bArray):
# Avoid one extra string copy by using a buffer to limit what we include
# in the result.
return buffer(bObj, offset) + b"".join(bArray)
class _ConsumerMixin(object):
"""
L{IConsumer} implementations can mix this in to get C{registerProducer} and
C{unregisterProducer} methods which take care of keeping track of a
producer's state.
Subclasses must provide three attributes which L{_ConsumerMixin} will read
but not write:
- connected: A C{bool} which is C{True} as long as the the consumer has
someplace to send bytes (for example, a TCP connection), and then
C{False} when it no longer does.
- disconnecting: A C{bool} which is C{False} until something like
L{ITransport.loseConnection} is called, indicating that the send buffer
should be flushed and the connection lost afterwards. Afterwards,
C{True}.
- disconnected: A C{bool} which is C{False} until the consumer no longer
has a place to send bytes, then C{True}.
Subclasses must also override the C{startWriting} method.
@ivar producer: C{None} if no producer is registered, otherwise the
registered producer.
@ivar producerPaused: A flag indicating whether the producer is currently
paused.
@type producerPaused: L{bool}
@ivar streamingProducer: A flag indicating whether the producer was
registered as a streaming (ie push) producer or not (ie a pull
producer). This will determine whether the consumer may ever need to
pause and resume it, or if it can merely call C{resumeProducing} on it
when buffer space is available.
@ivar streamingProducer: C{bool} or C{int}
"""
producer = None
producerPaused = False
streamingProducer = False
def startWriting(self):
"""
Override in a subclass to cause the reactor to monitor this selectable
for write events. This will be called once in C{unregisterProducer} if
C{loseConnection} has previously been called, so that the connection can
actually close.
"""
raise NotImplementedError("%r did not implement startWriting")
def registerProducer(self, producer, streaming):
"""
Register to receive data from a producer.
This sets this selectable to be a consumer for a producer. When this
selectable runs out of data on a write() call, it will ask the producer
to resumeProducing(). When the FileDescriptor's internal data buffer is
filled, it will ask the producer to pauseProducing(). If the connection
is lost, FileDescriptor calls producer's stopProducing() method.
If streaming is true, the producer should provide the IPushProducer
interface. Otherwise, it is assumed that producer provides the
IPullProducer interface. In this case, the producer won't be asked to
pauseProducing(), but it has to be careful to write() data only when its
resumeProducing() method is called.
"""
if self.producer is not None:
raise RuntimeError(
"Cannot register producer %s, because producer %s was never "
"unregistered." % (producer, self.producer))
if self.disconnected:
producer.stopProducing()
else:
self.producer = producer
self.streamingProducer = streaming
if not streaming:
producer.resumeProducing()
def unregisterProducer(self):
"""
Stop consuming data from a producer, without disconnecting.
"""
self.producer = None
if self.connected and self.disconnecting:
self.startWriting()
@implementer(interfaces.ILoggingContext)
class _LogOwner(object):
"""
Mixin to help implement L{interfaces.ILoggingContext} for transports which
have a protocol, the log prefix of which should also appear in the
transport's log prefix.
"""
def _getLogPrefix(self, applicationObject):
"""
Determine the log prefix to use for messages related to
C{applicationObject}, which may or may not be an
L{interfaces.ILoggingContext} provider.
@return: A C{str} giving the log prefix to use.
"""
if interfaces.ILoggingContext.providedBy(applicationObject):
return applicationObject.logPrefix()
return applicationObject.__class__.__name__
def logPrefix(self):
"""
Override this method to insert custom logging behavior. Its
return value will be inserted in front of every line. It may
be called more times than the number of output lines.
"""
return "-"
@implementer(
interfaces.IPushProducer, interfaces.IReadWriteDescriptor,
interfaces.IConsumer, interfaces.ITransport,
interfaces.IHalfCloseableDescriptor)
class FileDescriptor(_ConsumerMixin, _LogOwner):
"""
An object which can be operated on by select().
This is an abstract superclass of all objects which may be notified when
they are readable or writable; e.g. they have a file-descriptor that is
valid to be passed to select(2).
"""
connected = 0
disconnected = 0
disconnecting = 0
_writeDisconnecting = False
_writeDisconnected = False
dataBuffer = b""
offset = 0
SEND_LIMIT = 128*1024
def __init__(self, reactor=None):
"""
@param reactor: An L{IReactorFDSet} provider which this descriptor will
use to get readable and writeable event notifications. If no value
is given, the global reactor will be used.
"""
if not reactor:
from twisted.internet import reactor
self.reactor = reactor
self._tempDataBuffer = [] # will be added to dataBuffer in doWrite
self._tempDataLen = 0
def connectionLost(self, reason):
"""The connection was lost.
This is called when the connection on a selectable object has been
lost. It will be called whether the connection was closed explicitly,
an exception occurred in an event handler, or the other end of the
connection closed it first.
Clean up state here, but make sure to call back up to FileDescriptor.
"""
self.disconnected = 1
self.connected = 0
if self.producer is not None:
self.producer.stopProducing()
self.producer = None
self.stopReading()
self.stopWriting()
def writeSomeData(self, data):
"""
Write as much as possible of the given data, immediately.
This is called to invoke the lower-level writing functionality, such
as a socket's send() method, or a file's write(); this method
returns an integer or an exception. If an integer, it is the number
of bytes written (possibly zero); if an exception, it indicates the
connection was lost.
"""
raise NotImplementedError("%s does not implement writeSomeData" %
reflect.qual(self.__class__))
def doRead(self):
"""
Called when data is available for reading.
Subclasses must override this method. The result will be interpreted
in the same way as a result of doWrite().
"""
raise NotImplementedError("%s does not implement doRead" %
reflect.qual(self.__class__))
def doWrite(self):
"""
Called when data can be written.
@return: C{None} on success, an exception or a negative integer on
failure.
@see: L{twisted.internet.interfaces.IWriteDescriptor.doWrite}.
"""
if len(self.dataBuffer) - self.offset < self.SEND_LIMIT:
# If there is currently less than SEND_LIMIT bytes left to send
# in the string, extend it with the array data.
self.dataBuffer = _concatenate(
self.dataBuffer, self.offset, self._tempDataBuffer)
self.offset = 0
self._tempDataBuffer = []
self._tempDataLen = 0
# Send as much data as you can.
if self.offset:
l = self.writeSomeData(lazyByteSlice(self.dataBuffer, self.offset))
else:
l = self.writeSomeData(self.dataBuffer)
# There is no writeSomeData implementation in Twisted which returns
# < 0, but the documentation for writeSomeData used to claim negative
# integers meant connection lost. Keep supporting this here,
# although it may be worth deprecating and removing at some point.
if isinstance(l, Exception) or l < 0:
return l
self.offset += l
# If there is nothing left to send,
if self.offset == len(self.dataBuffer) and not self._tempDataLen:
self.dataBuffer = b""
self.offset = 0
# stop writing.
self.stopWriting()
# If I've got a producer who is supposed to supply me with data,
if self.producer is not None and ((not self.streamingProducer)
or self.producerPaused):
# tell them to supply some more.
self.producerPaused = False
self.producer.resumeProducing()
elif self.disconnecting:
# But if I was previously asked to let the connection die, do
# so.
return self._postLoseConnection()
elif self._writeDisconnecting:
# I was previously asked to half-close the connection. We
# set _writeDisconnected before calling handler, in case the
# handler calls loseConnection(), which will want to check for
# this attribute.
self._writeDisconnected = True
result = self._closeWriteConnection()
return result
return None
def _postLoseConnection(self):
"""Called after a loseConnection(), when all data has been written.
Whatever this returns is then returned by doWrite.
"""
# default implementation, telling reactor we're finished
return main.CONNECTION_DONE
def _closeWriteConnection(self):
# override in subclasses
pass
def writeConnectionLost(self, reason):
# in current code should never be called
self.connectionLost(reason)
def readConnectionLost(self, reason):
# override in subclasses
self.connectionLost(reason)
def _isSendBufferFull(self):
"""
Determine whether the user-space send buffer for this transport is full
or not.
When the buffer contains more than C{self.bufferSize} bytes, it is
considered full. This might be improved by considering the size of the
kernel send buffer and how much of it is free.
@return: C{True} if it is full, C{False} otherwise.
"""
return len(self.dataBuffer) + self._tempDataLen > self.bufferSize
def _maybePauseProducer(self):
"""
Possibly pause a producer, if there is one and the send buffer is full.
"""
# If we are responsible for pausing our producer,
if self.producer is not None and self.streamingProducer:
# and our buffer is full,
if self._isSendBufferFull():
# pause it.
self.producerPaused = True
self.producer.pauseProducing()
def write(self, data):
"""Reliably write some data.
The data is buffered until the underlying file descriptor is ready
for writing. If there is more than C{self.bufferSize} data in the
buffer and this descriptor has a registered streaming producer, its
C{pauseProducing()} method will be called.
"""
if isinstance(data, unicode): # no, really, I mean it
raise TypeError("Data must not be unicode")
if not self.connected or self._writeDisconnected:
return
if data:
self._tempDataBuffer.append(data)
self._tempDataLen += len(data)
self._maybePauseProducer()
self.startWriting()
def writeSequence(self, iovec):
"""
Reliably write a sequence of data.
Currently, this is a convenience method roughly equivalent to::
for chunk in iovec:
fd.write(chunk)
It may have a more efficient implementation at a later time or in a
different reactor.
As with the C{write()} method, if a buffer size limit is reached and a
streaming producer is registered, it will be paused until the buffered
data is written to the underlying file descriptor.
"""
for i in iovec:
if isinstance(i, unicode): # no, really, I mean it
raise TypeError("Data must not be unicode")
if not self.connected or not iovec or self._writeDisconnected:
return
self._tempDataBuffer.extend(iovec)
for i in iovec:
self._tempDataLen += len(i)
self._maybePauseProducer()
self.startWriting()
def loseConnection(self, _connDone=failure.Failure(main.CONNECTION_DONE)):
"""Close the connection at the next available opportunity.
Call this to cause this FileDescriptor to lose its connection. It will
first write any data that it has buffered.
If there is data buffered yet to be written, this method will cause the
transport to lose its connection as soon as it's done flushing its
write buffer. If you have a producer registered, the connection won't
be closed until the producer is finished. Therefore, make sure you
unregister your producer when it's finished, or the connection will
never close.
"""
if self.connected and not self.disconnecting:
if self._writeDisconnected:
# doWrite won't trigger the connection close anymore
self.stopReading()
self.stopWriting()
self.connectionLost(_connDone)
else:
self.stopReading()
self.startWriting()
self.disconnecting = 1
def loseWriteConnection(self):
self._writeDisconnecting = True
self.startWriting()
def stopReading(self):
"""Stop waiting for read availability.
Call this to remove this selectable from being notified when it is
ready for reading.
"""
self.reactor.removeReader(self)
def stopWriting(self):
"""Stop waiting for write availability.
Call this to remove this selectable from being notified when it is ready
for writing.
"""
self.reactor.removeWriter(self)
def startReading(self):
"""Start waiting for read availability.
"""
self.reactor.addReader(self)
def startWriting(self):
"""Start waiting for write availability.
Call this to have this FileDescriptor be notified whenever it is ready for
writing.
"""
self.reactor.addWriter(self)
# Producer/consumer implementation
# first, the consumer stuff. This requires no additional work, as
# any object you can write to can be a consumer, really.
producer = None
bufferSize = 2**2**2**2
def stopConsuming(self):
"""Stop consuming data.
This is called when a producer has lost its connection, to tell the
consumer to go lose its connection (and break potential circular
references).
"""
self.unregisterProducer()
self.loseConnection()
# producer interface implementation
def resumeProducing(self):
if self.connected and not self.disconnecting:
self.startReading()
def pauseProducing(self):
self.stopReading()
def stopProducing(self):
self.loseConnection()
def fileno(self):
"""File Descriptor number for select().
This method must be overridden or assigned in subclasses to
indicate a valid file descriptor for the operating system.
"""
return -1
def isIPAddress(addr):
"""
Determine whether the given string represents an IPv4 address.
@type addr: C{str}
@param addr: A string which may or may not be the decimal dotted
representation of an IPv4 address.
@rtype: C{bool}
@return: C{True} if C{addr} represents an IPv4 address, C{False}
otherwise.
"""
dottedParts = addr.split('.')
if len(dottedParts) == 4:
for octet in dottedParts:
try:
value = int(octet)
except ValueError:
return False
else:
if value < 0 or value > 255:
return False
return True
return False
def isIPv6Address(addr):
"""
Determine whether the given string represents an IPv6 address.
@param addr: A string which may or may not be the hex
representation of an IPv6 address.
@type addr: C{str}
@return: C{True} if C{addr} represents an IPv6 address, C{False}
otherwise.
@rtype: C{bool}
"""
if '%' in addr:
addr = addr.split('%', 1)[0]
if not addr:
return False
try:
# This might be a native implementation or the one from
# twisted.python.compat.
inet_pton(AF_INET6, addr)
except (ValueError, error):
return False
return True
__all__ = ["FileDescriptor", "isIPAddress", "isIPv6Address"]
|
bsd-3-clause
|
cboling/SDNdbg
|
docs/old-stuff/pydzcvr/doc/neutron/db/migration/alembic_migrations/versions/2db5203cb7a9_nuage_floatingip.py
|
17
|
4286
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nuage_floatingip
Revision ID: 2db5203cb7a9
Revises: 10cd28e692e9
Create Date: 2014-05-19 16:39:42.048125
"""
# revision identifiers, used by Alembic.
revision = '2db5203cb7a9'
down_revision = '10cd28e692e9'
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
# This migration will be executed only if the neutron DB schema contains
# the tables for the nuage plugin.
# This migration will be skipped when executed in offline mode.
@migration.skip_if_offline
def upgrade():
# These tables will be created even if the nuage plugin is not enabled.
# This is fine as they would be created anyway by the healing migration.
if migration.schema_has_table('routers'):
# In the database we are migrating from, the configured plugin
# did not create the routers table.
op.create_table(
'nuage_floatingip_pool_mapping',
sa.Column('fip_pool_id', sa.String(length=36), nullable=False),
sa.Column('net_id', sa.String(length=36), nullable=True),
sa.Column('router_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['net_id'], ['networks.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('fip_pool_id'),
)
if migration.schema_has_table('floatingips'):
# In the database we are migrating from, the configured plugin
# did not create the floatingips table.
op.create_table(
'nuage_floatingip_mapping',
sa.Column('fip_id', sa.String(length=36), nullable=False),
sa.Column('router_id', sa.String(length=36), nullable=True),
sa.Column('nuage_fip_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['fip_id'], ['floatingips.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('fip_id'),
)
migration.rename_table_if_exists('net_partitions',
'nuage_net_partitions')
migration.rename_table_if_exists('net_partition_router_mapping',
'nuage_net_partition_router_mapping')
migration.rename_table_if_exists('router_zone_mapping',
'nuage_router_zone_mapping')
migration.rename_table_if_exists('subnet_l2dom_mapping',
'nuage_subnet_l2dom_mapping')
migration.rename_table_if_exists('port_mapping',
'nuage_port_mapping')
migration.rename_table_if_exists('routerroutes_mapping',
'nuage_routerroutes_mapping')
@migration.skip_if_offline
def downgrade():
migration.drop_table_if_exists('nuage_floatingip_mapping')
migration.drop_table_if_exists('nuage_floatingip_pool_mapping')
migration.rename_table_if_exists('nuage_net_partitions', 'net_partitions')
migration.rename_table_if_exists('nuage_net_partition_router_mapping',
'net_partition_router_mapping')
migration.rename_table_if_exists('nuage_router_zone_mapping',
'router_zone_mapping')
migration.rename_table_if_exists('nuage_subnet_l2dom_mapping',
'subnet_l2dom_mapping')
migration.rename_table_if_exists('nuage_port_mapping', 'port_mapping')
migration.rename_table_if_exists('nuage_routerroutes_mapping',
'routerroutes_mapping')
|
apache-2.0
|
df8oe/UHSDR
|
mchf-eclipse/drivers/ui/lcd/edit-8x8-font.py
|
4
|
2343
|
# Tool to extract 8x8 font data, save to bitmap file, and apply modifications
# to source code after editing the bitmap.
from __future__ import print_function
from matplotlib.pyplot import imread, imsave, imshow, show
import numpy as np
import sys
# Where to find the font data - may need updated if code has changed.
source_file = 'ui_lcd_hy28_fonts.c'
start_marker = 'const uint8_t GL_ASCII8x8_Table [] ='
start_offset = 2 # Data starts this number of lines after start marker.
end_marker = '};' # Indicates end of font data.
# Image filename used in extract and insert modes.
image_file = 'font-8x8.png'
mode = None
if len(sys.argv) > 1:
mode = sys.argv[1]
if mode not in ('show', 'extract', 'insert'):
print("Usage: %s { show | extract | insert }" % sys.argv[0])
sys.exit(1)
# Get the literals used to populate the font array in the source file
lines = [line.rstrip() for line in open(source_file).readlines()]
start = lines.index(start_marker) + start_offset
end = start + lines[start:].index(end_marker)
data = str.join("", lines[start:end])
# Eval the literals to get the values into a numpy array
packed = eval("np.array([%s], np.uint8)" % data)
# Reorganise into a monochrome image, with the 96 8 x 8 characters
# laid out in 8 rows by 12 columns for easier viewing/editing
unpacked = np.unpackbits(packed)
bitmaps = unpacked.reshape(96, 8, 8)
indices = np.arange(96).reshape(8, 12)
image = np.block([[bitmaps[idx] for idx in row] for row in indices])
if mode == 'show':
# Display font image
imshow(image, cmap='binary')
show()
elif mode == 'extract':
# Save font image
imsave(image_file, image, format='png', cmap='binary')
elif mode == 'insert':
# Read in modified font image
image = imread(image_file)[:,:,0].astype(bool)
# Reorganise back to original order
bitmaps = np.vstack([np.vstack(np.split(row, 12, 1))
for row in np.split(image, 8)])
unpacked = bitmaps.reshape(-1)
packed = ~np.packbits(unpacked)
# Replace lines of file in same format as used before
grouped = packed.reshape(-1, 8)
for i, group in enumerate(grouped):
line = (" " + " 0x%02x," * 8) % tuple(group)
lines[start + i] = line
# Write out modified source file
open(source_file, 'w').writelines([line + "\n" for line in lines])
|
gpl-3.0
|
pducks32/intergrala
|
python/sympy/sympy/galgebra/ncutil.py
|
25
|
14339
|
# sympy/galgebra/ncutil.py
"""
ncutil.py contains all the needed utility functions that only depend on
SymPy and that are required for the expansion and manipulation of linear
combinations of noncommutative SymPy symbols.
also contains "half_angle_reduce" which is probably not needed any more
due to the improvements in trigsimp.
"""
from sympy import expand, Mul, Add, Symbol, S, Pow, diff, trigsimp, \
simplify, sin, cos, symbols
try:
from numpy import matrix
numpy_loaded = True
except ImportError:
numpy_loaded = False
ONE_NC = Symbol('ONE', commutative=False)
def get_commutative_coef(expr):
if isinstance(expr, Mul):
(coefs, bases) = expr.args_cnc()
return Mul(*coefs)
return S.One
def half_angle_reduce(expr, theta):
s, c = symbols('s c')
sub_dict = {sin(theta / 2): s, cos(theta / 2): c}
new_expr = expr.subs(sub_dict)
sub_dict = {s * c: sin(theta) / 2, s**2: (1 - cos(theta)) / 2, c**2: (1 + cos(theta)) / 2}
# print new_expr
new_expr = trigsimp(simplify(new_expr.subs(sub_dict)), recursive=True)
# print expand(new_expr)
return new_expr
def linear_expand(expr):
"""
If a sympy 'Expr' is of the form:
expr = expr_0 + expr_1*a_1 + ... + expr_n*a_n
where all the a_j are noncommuting symbols in basis then
(expr_0, ..., expr_n) and (1, a_1, ..., a_n) are returned. Note that
expr_j*a_j does not have to be of that form, but rather can be any
Mul with a_j as a factor (it doen not have to be a postmultiplier).
expr_0 is the scalar part of the expression.
"""
expr = expand(expr)
if expr.is_commutative: # commutative expr only contains expr_0
return (expr, ), (S.One, )
if isinstance(expr, Mul): # expr only contains one term
(coefs, bases) = expr.args_cnc()
coefs = Mul(*coefs)
bases = bases[0]
elif isinstance(expr, Symbol): # term is Symbol
coefs = S.One
bases = expr
elif isinstance(expr, Add): # expr has multiple terms
coefs = []
bases = []
for arg in expr.args:
term = arg.args_cnc()
coef = Mul(*term[0])
base = term[1][0]
if base in bases: # increment coefficient of base
ibase = list(bases).index(base) # Python 2.5
coefs[ibase] += coef
else: # add base to list
coefs.append(coef)
bases.append(base)
else:
raise NotImplementedError("linear_expand for type %s" % type(expr))
if not isinstance(coefs, list): # convert single coef to list
coefs = [coefs]
if not isinstance(bases, list): # convert single base to list
bases = [bases]
coefs = tuple(coefs)
bases = tuple(bases)
return coefs, bases
def linear_projection(expr, plist=None):
"""
If a sympy 'Expr' is of the form:
expr = expr_0 + expr_1*a_1 + ... + expr_n*a_n
where all the a_j are noncommuting symbols in basis then
proj(expr) returns the sum of those terms where a_j is in plist
"""
if expr.is_commutative and plist is None: # return scalar projection
return expr
expr = expand(expr)
if isinstance(expr, Mul): # expr has single term
(coefs, bases) = expr.args_cnc()
if bases[0] in plist: # vector term to be projected
return Mul(*coefs) * bases[0]
else:
return S.Zero
elif isinstance(expr, Symbol): # base vector to be projected
if expr in plist:
return expr
else:
return S.Zero
elif isinstance(expr, Add): # expr has multiple terms
result = S.Zero
for arg in expr.args:
term = arg.args_cnc()
if term[1] == [] and plist is None: # scalar term to be projected
result += Mul(*term[0])
elif term[1] != [] and plist is not None and term[1][0] in plist: # vector term to be projected
result += Mul(*term[0]) * term[1][0]
return result
def non_scalar_projection(expr):
"""
If a sympy 'Expr' is of the form:
expr = expr_0*S.One + expr_1*a_1 + ... + expr_n*a_n
where all the a_j are noncommuting symbols in basis then
proj(expr) returns the sum of those terms where a_j is in plist
"""
if expr.is_commutative: # return scalar projection
return S.Zero
expr = expand(expr)
if isinstance(expr, Mul): # expr has single term
(coefs, bases) = expr.args_cnc()
if bases[0] != ONE_NC: # vector term to be projected
return Mul(*coefs) * bases[0]
else:
return S.Zero
elif isinstance(expr, Symbol): # base vector to be projected
if expr != ONE_NC:
return expr
else:
return S.Zero
elif isinstance(expr, Add): # expr has multiple terms
result = S.Zero
for arg in expr.args:
term = arg.args_cnc()
if term[1] != ONE_NC: # vector term to be projected
result += Mul(*term[0]) * term[1][0]
return result
def nc_substitue(expr, sub_dict):
(coefs, bases) = linear_expand(expr)
result = S.Zero
for (coef, base) in zip(coefs, bases):
if base != 1:
result += coef * sub_dict[base]
return result
def linear_function(expr, fct):
"""
If a sympy 'Expr' is of the form:
expr = expr_0 + expr_1*a_1 + ... + expr_n*a_n
where all the a_j are noncommuting symbols in basis then
f(expr) = expr_0 + expr_1*f(a_1) + ... + expr_n*f(a_n)
is returned
"""
if expr.is_commutative:
return expr
expr = expand(expr)
if isinstance(expr, Mul):
(coefs, bases) = expr.args_cnc()
return Mul(*coefs) * fct(bases[0])
elif isinstance(expr, Symbol):
return fct(expr)
elif isinstance(expr, Add):
result = S.Zero
for arg in expr.args:
term = arg.args_cnc()
if term[1] == []:
result += Mul(*term[0])
else:
result += Mul(*term[0]) * fct(term[1][0])
return result
def coef_function(expr, fct):
"""
If a sympy 'Expr' is of the form:
expr = expr_0 + expr_1*a_1 + ... + expr_n*a_n
where all the a_j are noncommuting symbols in basis then
f(expr) = fct(expr_0) + fct(expr_1)*a_1 + ... + fct(expr_n)*a_n
is returned
"""
expr = expand(expr)
if isinstance(expr, Mul):
(coefs, bases) = expr.args_cnc()
return fct(Mul(*coefs)) * bases[0]
elif isinstance(expr, Symbol):
if expr.is_commutative:
return fct(expr)
else:
return expr
elif isinstance(expr, Add):
result = S.Zero
for arg in expr.args:
term = arg.args_cnc()
if term[1] == []:
result += fct(Mul(*term[0]))
else:
result += fct(Mul(*term[0])) * fct(term[1][0])
return result
def bilinear_product(expr, fct):
"""
If a sympy 'Expr' is of the form:
expr = expr_ij*a_i*a_j or expr_0 or expr_i*a_i
where all the a_i are noncommuting symbols in basis and the expr's
are commuting expressions then
bilinear_product(expr) = expr_ij*fct(a_i, a_j)
bilinear_product(expr_0) = expr_0
bilinear_product(expr_i*a_i) = expr_i*a_i
"""
def bilinear_term(expr, fct):
if expr.is_zero:
return expr
if isinstance(expr, Mul): # bases in expr
(coefs, bases) = expr.args_cnc()
coef = Mul(*tuple(coefs))
if isinstance(bases[0], Pow): # base is a_i**2
args = bases[0].args
return coef * fct(args[0], args[0])
elif len(bases) == 1: # base is a_i
return expr
else: # base is a_i*a_j
return coef * fct(bases[0], bases[1])
elif isinstance(expr, Pow): # expr is a_i*a_i
args = expr.args
return fct(args[0], args[0])
elif isinstance(expr, Symbol):
return expr
else:
raise TypeError('!!!!Cannot compute bilinear_product for ' + str(expr) + '!!!!\n')
expr = expand(expand(expr))
if not isinstance(expr, Add):
return bilinear_term(expr, fct)
else:
result = S.Zero
for term in expr.args:
tmp = bilinear_term(term, fct)
result += tmp
return result
def multilinear_product(expr, fct):
"""
If a sympy 'Expr' is of the form:
expr = expr_i1i2...irj*a_i1*a_i2*...*a_ir or expr_0
where all the a_i are noncommuting symbols in basis and the expr's
are commuting expressions then
multilinear_product(expr) = expr_i1i2...ir*fct(a_i1, a_i2, ..., a_ir)
bilinear_product(expr_0) = expr_0
where fct() is defined for r <= n the total number of bases
"""
if expr.is_commutative: # no bases in expr
return expr
if isinstance(expr, Mul): # bases in expr
(coefs, bases) = expr.args_cnc()
if len(coefs) == 0: # expr_ij = 1
coefs = [S.One]
coef = Mul(*tuple(coefs))
new_bases = []
for base in bases:
if isinstance(base, Pow):
args = base.args
new_bases += args[1] * [args[0]]
else:
new_bases.append(base)
return coef * fct(new_bases)
def bilinear_function(expr, fct):
"""
If a sympy 'Expr' is of the form:
expr = expr_0 + expr_1*a_1 + ... + expr_n*a_n + expr_11*a_1*a_1
+ ... + expr_ij*a_i*a_j + ... + expr_nn*a_n*a_n
where all the a_j are noncommuting symbols in basis then
bilinear_function(expr) = bilinear_product(expr_0) + bilinear_product(expr_1*a_1) + ... + bilinear_product(expr_n*a_n)
+ bilinear + product(expr_11*a_1*a_1) + ... + bilinear_product(expr_nn*a_n*a_n)
"""
if expr.is_commutative:
return expr
expr = expand(expr)
if isinstance(expr, (Mul, Pow, Symbol)): # only one additive term
return bilinear_product(expr, fct)
elif isinstance(expr, Add): # multiple additive terms
result = S.Zero
for arg in expr.args:
result += bilinear_product(arg, fct)
return result
def multilinear_function(expr, fct):
"""
If a sympy 'Expr' is of the form summation convention):
expr = expr_0 + Sum{0 < r <= n}{expr_i1i2...ir*a_i1*a_i2*...*a_ir}
where all the a_j are noncommuting symbols in basis then and the
dimension of the basis in n then
bilinear_function(expr) = multilinear_product(expr_0)
+ Sum{0<r<=n}multilinear_product(expr_i1i2...ir*a_i1*a_i2*...*a_ir)
"""
if expr.is_commutative:
return expr
expr = expand(expr)
if isinstance(expr, (Mul, Pow, Symbol)): # only one additive term
return bilinear_product(expr, fct)
elif isinstance(expr, Add): # multiple additive terms
result = S.Zero
for arg in expr.args:
result += bilinear_product(arg, fct)
return result
def linear_derivation(expr, fct, x):
"""
If a sympy 'Expr' is of the form:
expr = expr_0 + expr_1*a_1 + ... + expr_n*a_n
where all the a_j are noncommuting symbols in basis then
linear_drivation(expr) = diff(expr_0, x) + diff(expr_1, x)*a_1 + ...
+ diff(expr_n, x)*a_n + expr_1*fct(a_1, x) + ...
+ expr_n*fct(a_n, x)
"""
if expr.is_commutative:
return diff(expr, x)
expr = expand(expr)
if isinstance(expr, Mul):
x = (coefs, bases) = expr.args_cnc()
coef = Mul(*coefs)
return diff(coef, x) * bases[0] + coef * fct(bases[0], x)
elif isinstance(expr, Symbol):
return fct(expr, x)
elif isinstance(expr, Add):
result = S.Zero
for arg in expr.args:
term = arg.args_cnc()
coef = Mul(*term[0])
if term[1] == []:
result += diff(coef, x)
else:
result += diff(coef, x) * term[1][0] + coef * fct(term[1][0], x)
return result
def product_derivation(F, fct, x):
"""
If a sympy 'Expr' is of the form:
expr = expr_0*a_1*...*a_n
where all the a_j are noncommuting symbols in basis then
product_derivation(expr) = diff(expr_0, x)*a_1*...*a_n
+ expr_0*(fct(a_1, x)*a_2*...*a_n + ...
+ a_1*...*a_(i-1)*fct(a_i, x)*a_(i + 1)*...*a_n + ...
+ a_1*...*a_(n-1)*fct(a_n, x))
"""
if F.is_commutative:
return diff(F, x)
elif isinstance(F, Mul):
(coefs, bases) = F.args_cnc()
coef = Mul(*coefs)
dcoef = diff(coef, x)
if len(bases) == 1:
return dcoef * bases[0] + coef * fct(bases[0], x)
else:
result = dcoef * Mul(*bases)
for ib in range(len(bases)):
result += coef * Mul(*bases[:ib]) * fct(bases[ib], x) * Mul(*bases[ib + 1:])
return result
elif isinstance(F, Symbol):
return fct(F, x)
def multilinear_derivation(F, fct, x):
"""
If a sympy 'Expr' is of the form (summation convention):
expr = expr_0 + expr_i1i2...ir*a_i1*...*a_ir
where all the a_j are noncommuting symbols in basis then
dexpr = diff(expr_0, x) + d(expr_i1i2...ir*a_i1*...*a_ir)
is returned where d() is the product derivation
"""
if F.is_commutative:
return diff(F, x)
elif isinstance(F, Mul) or isinstance(F, Symbol):
return product_derivation(F, fct, x)
elif isinstance(F, Add):
result = S.Zero
for term in F.args:
result += product_derivation(term, fct, x)
return result
def numpy_matrix(M):
if not numpy_loaded:
raise ImportError('Cannot use "numpy_matrix" since "numpy" is not loaded')
Mlst = M.tolist()
nrows = len(Mlst)
ncols = len(Mlst[0])
for irow in range(nrows):
for icol in range(ncols):
try:
Mlst[irow][icol] = float(Mlst[irow][icol])
except ValueError:
raise TypeError('In Matrix:\n%s\nCannot convert %s to python float.' % (M, Mlst[irow][icol]))
return matrix(Mlst)
|
mit
|
authman/Python201609
|
Nguyen_Ken/Assignments/Flask/registration_form/server.py
|
1
|
1721
|
from flask import Flask, render_template, request, redirect, session, flash
import re
app = Flask(__name__)
app.secret_key = 'secretsquirrel'
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/process', methods=['POST'])
def process():
session['email'] = request.form['email']
session['first_name'] = request.form['first_name']
session['last_name'] = request.form['last_name']
session['password'] = request.form['password1']
session['confirm_password'] = request.form['password2']
if len(session['email']) < 1:
flash('Please enter your email', 'error')
elif not EMAIL_REGEX.match(session['email']):
flash('That is not a valid email address', 'error')
elif len(session['first_name']) < 1:
flash('Please enter your first name', 'error')
elif not session['first_name'].isalpha():
flash('Your name cannot contain numbers or special characters', 'error')
elif len(session['last_name']) < 1:
flash('Please enter your last name', 'error')
elif not session['last_name'].isalpha():
flash('Your name cannot contain numbers or special characters', 'error')
elif len(session['password']) < 1:
flash('Please enter a password', 'error')
elif len(session['password']) < 8:
flash('Your password must be greater than 8 characters', 'error')
elif not session['confirm_password'] == session['password']:
flash('Your password does not match!', 'error')
else:
flash('Thanks for submitting your information', 'success')
return redirect('/')
app.run(debug=True)
|
mit
|
creative-quant/voltdb
|
tests/scripts/examples/sql_coverage/all-config.py
|
7
|
15894
|
#!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2015 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# HACK:
# This SQL coverage configuration set represents hopefully the largest
# set of statements that should always pass. Some of the template files
# generate statements that result in repeated failures due to functional
# differences between HSQL and VoltDB backends. In such cases, we work around
# these errors by generating a fixed sample query file and, after culling out
# any statements that cause mismatches, using it to replace the original
# template file name in the configuration list below.
# In this way, the sample file gets used as a trivial template file that passes
# through the generator untouched. It also has an unfortunate side-effect of
# causing any future improvements to the template to be ignored unless/until
# the sample file is manually re-generated from it and re-edited to eliminate
# mismatches.
#
# Actually, in the specific case of templates that generate random integer
# constant timestamp values, the template must be replaced by TWO separate
# generated sample files -- one generated for hsql with millisecond constants
# and one for VoltDB with microsecond constants (always 1000 X the hsql values).
# The hsql version of the sample file gets associated with the optional
# "template-hsqldb" key in the configuration. Otherwise, both hsql and VoltDB
# use the same input file associated with the "template" key.
#
# The generated sample files follow a naming convention of starting with
# "regression". The hsql variants end in "-hsql.sql".
# It is NOT advisable to try to edit these sample files directly.
# It is better to edit the original template, re-generate the sample(s),
# and re-cull the resulting "mismatches" -- being careful to cull ONLY
# mismatches that are NOT accountable to the known backend differences that
# we are working around in this way. It helps to have comments, below,
# to describe which specific issues each "regression" file is intended to
# work around.
#
# To regenerate the regression-*.sql file for a configuration, run the SQLGenerateReport.py
# tool on the report.xml file generated for that configuration, using the -f true switch,
# which will cause the successful statements to be written to stdout.
{
# from regression-config.py
# THESE ALL SUCCEED, USE THE TEMPLATE INPUT
"basic": {"schema": "schema.py",
"ddl": "DDL.sql",
"template": "basic.sql",
"normalizer": "normalizer.py"},
# THESE ALL SUCCEED, USE THE TEMPLATE INPUT
"basic-joins": {"schema": "schema.py",
"ddl": "DDL.sql",
"template": "basic-joins.sql",
"normalizer": "normalizer.py"},
# THESE ALL SUCCEED, USE THE TEMPLATE INPUT
"basic-unions": {"schema": "union-schema.py",
"ddl": "DDL.sql",
"template": "basic-unions.sql",
"normalizer": "normalizer.py"},
"mixed-unions": {"schema": "union-schema.py",
"ddl": "DDL.sql",
"template": "mixed-unions.sql",
"normalizer": "normalizer.py"},
# THESE ALL SUCCEED, USE THE TEMPLATE INPUT
"basic-index": {"schema": "schema.py",
"ddl": "index-DDL.sql",
"template": "basic.sql",
"normalizer": "normalizer.py"},
# THESE ALL SUCCEED, USE THE TEMPLATE INPUT
"basic-strings": {"schema": "strings-schema.py",
"ddl": "strings-DDL.sql",
"template": "basic-strings.sql",
"normalizer": "normalizer.py"},
# THESE ALL SUCCEED, USE TEMPLATE INPUT
"partial-covering": {"schema": "partial-covering-schema.py",
"ddl": "partial-covering-DDL.sql",
"template": "partial-covering.sql",
"normalizer": "normalizer.py"},
# BIGINT OVERFLOW CAUSES FAILURES IN THIS SUITE, USE REGRESSION INPUT
"regression-basic-ints": {"schema": "int-schema.py",
"ddl": "int-DDL.sql",
"template": "regression-basic-ints.sql",
"normalizer": "normalizer.py"},
# HSQL HAS BAD DEFAULT PRECISION
# AND VoltDB gives VOLTDB ERROR: Type DECIMAL can't be cast as FLOAT
# AND HSQLDB backend gives the likes of:
# VOLTDB ERROR: UNEXPECTED FAILURE: org.voltdb.ExpectedProcedureException:
# HSQLDB Backend DML Error (Scale of 56.11063569750000000000000000000000 is 32 and the max is 12)
# USE REGRESSION INPUT
"regression-basic-decimal": {"schema": "decimal-schema.py",
"ddl": "DDL.sql",
"template": "regression-basic-decimal.sql",
"normalizer": "normalizer.py"},
# If the ONLY problem was that HSQL HAS BAD DEFAULT PRECISION, we could use the original template input
# and FUZZY MATCHING, instead.
# Enable this test to investigate the "DECIMAL can't be cast as FLOAT" and/or "Backend DML Error" issues
# without being thrown off by HSQL HAS BAD DEFAULT PRECISION issues.
# "basic-decimal-fuzzy": {"schema": "decimal-schema.py",
# "ddl": "DDL.sql",
# "template": "basic-decimal.sql",
# "normalizer": "fuzzynormalizer.py"},
#
# FLOATING POINT ROUNDING ISSUES BETWEEN VOLT AND HSQL, USE FUZZY MATCHING
"basic-timestamp": {"schema": "timestamp-schema.py",
"ddl": "DDL.sql",
"template": "basic-timestamp.sql",
"normalizer": "normalizer.py"},
"advanced-timestamp": {"schema": "timestamp-schema.py",
"ddl": "DDL.sql",
"template": "advanced-timestamp.sql",
"normalizer": "normalizer.py"},
# BIGINT OVERFLOW CAUSES FAILURES IN THIS SUITE, USE REGRESSION INPUT
"regression-basic-matview": {"schema": "matview-basic-schema.py",
"ddl": "matview-DDL.sql",
"template": "regression-basic-matview.sql",
"normalizer": "normalizer.py"},
# THESE ALL SUCCEED, USE TEMPLATE INPUT
"advanced": {"schema": "schema.py",
"ddl": "DDL.sql",
"template": "advanced.sql",
"normalizer": "normalizer.py"},
# THESE ALL SUCCEED, USE TEMPLATE INPUT
"advanced-index": {"schema": "schema.py",
"ddl": "index-DDL.sql",
"template": "advanced.sql",
"normalizer": "normalizer.py"},
"advanced-compoundex": {"schema": "schema.py",
"ddl": "compoundex-DDL.sql",
"template": "advanced.sql",
"normalizer": "normalizer.py"},
# THESE ALL SUCCEED, USE TEMPLATE INPUT
"advanced-strings": {"schema": "strings-schema.py",
"ddl": "strings-DDL.sql",
"template": "advanced-strings.sql",
"normalizer": "normalizer.py"},
"advanced-ints": {"schema": "int-schema.py",
"ddl": "int-DDL.sql",
"template": "advanced-ints.sql",
"normalizer": "normalizer.py"},
# BIGINT OVERFLOW CAUSES FAILURES IN THIS SUITE, USE REGRESSION INPUT
"regression-advanced-ints-cntonly": {"schema": "int-schema.py",
"ddl": "int-DDL.sql",
"template": "regression-advanced-ints-cntonly.sql",
"normalizer": "not-a-normalizer.py"},
# ADVANCED MATERIALIZED VIEW TESTING, INCLUDING COMPLEX GROUP BY AND AGGREGATIONS.
"advanced-matview-nonjoin": {"schema": "matview-advanced-nonjoin-schema.py",
"ddl": "matview-DDL.sql",
"template": "advanced-matview-nonjoin.sql",
"normalizer": "normalizer.py"},
"advanced-matview-join": {"schema": "matview-advanced-join-schema.py",
"ddl": "matview-DDL.sql",
"template": "advanced-matview-join.sql",
"normalizer": "normalizer.py"},
# To test index count
"index-count1": {"schema": "index-count1-schema.py",
"ddl": "DDL.sql",
"template": "index-count1.sql",
"normalizer": "normalizer.py"},
# To test index scan: forward scan, reverse scan
"index-scan": {"schema": "index-scan-schema.py",
"ddl": "index-DDL.sql",
"template": "index-scan.sql",
"normalizer": "normalizer.py"},
# This suite written to test push-down of aggregates and limits in combination
# with indexes, projections and order-by.
"pushdown": {"schema": "pushdown-schema.py",
"ddl": "DDL.sql",
"template": "pushdown.sql",
"normalizer": "normalizer.py"},
# HSQL SEEMS TO HAVE A BAD DEFAULT PRECISION
# AND VoltDB gives VOLTDB ERROR: Type DECIMAL can't be cast as FLOAT, so keep it disabled, for now.
# If the only problem were HSQL HAS BAD DEFAULT PRECISION, we could USE FUZZY MATCHING.
# Enable this test to investigate the "DECIMAL can't be cast as FLOAT" issue
# "advanced-decimal-fuzzy": {"schema": "decimal-schema.py",
# "ddl": "DDL.sql",
# "template": "advanced-decimal.sql",
# "normalizer": "fuzzynormalizer.py"},
# from config.py
"basic-compoundex": {"schema": "schema.py",
"ddl": "compoundex-DDL.sql",
"template": "compound.sql",
"normalizer": "normalizer.py"},
# BIGINT OVERFLOW CAUSES FAILURES IN THIS SUITE, DISABLING
# also, the generator fails to generates statements for:
# Template "SELECT * FROM _table WHERE (_variable _cmp _value[int64]) _logic (_variable _cmp _variable)" failed to yield SQL statements
# Template "UPDATE _table SET BIG = _value[int64] WHERE (_variable _cmp _variable) _logic (_variable _cmp _value[int64])" failed to yield SQL statements
# Template "DELETE FROM _table WHERE (_variable _cmp _variable) _logic (_variable _cmp _value[int64])" failed to yield SQL statements
# because there are insufficient columns of the same type to satisfy all the _variables
# given how the generator works.
"basic-ints": {"schema": "int-schema.py",
"ddl": "int-DDL.sql",
"template": "basic-ints.sql",
"normalizer": "normalizer.py"},
# HSQL SEEMS TO HAVE A BAD DEFAULT PRECISION, DISABLING
# "basic-decimal": {"schema": "decimal-schema.py",
# "ddl": "DDL.sql",
# "template": "basic-decimal.sql",
# "normalizer": "normalizer.py"},
# Floating point rounding differences lead to deltas
# "basic-timestamp": {"schema": "timestamp-schema.py",
# "ddl": "timestamp-DDL.sql",
# "template": "basic-timestamp.sql",
# "normalizer": "normalizer.py"},
# BIGINT OVERFLOW CAUSES FAILURES IN THIS SUITE
# also, the generator fails to generate statements for:
# Template "UPDATE _table SET BIG = _value[int64] WHERE (_variable _cmp _variable) _logic (_variable _cmp _value[int64])" failed to yield SQL statements
# Template "DELETE FROM _table WHERE (_variable _cmp _variable) _logic (_variable _cmp _value[int64])" failed to yield SQL statements
# "basic-matview": {"schema": "matview-schema.py",
# "ddl": "int-DDL.sql",
# "template": "basic-matview.sql",
# "normalizer": "normalizer.py"},
"basic-index-joins": {"schema": "schema.py",
"ddl": "index-DDL.sql",
"template": "basic-joins.sql",
"normalizer": "normalizer.py"},
"basic-compoundex-joins": {"schema": "schema.py",
"ddl": "compoundex-DDL.sql",
"template": "basic-joins.sql",
"normalizer": "normalizer.py"},
# TODO: Need to scale down precision of values to keep HSQL happy even after math
"numeric-decimals": {"schema": "decimal-schema.py",
"ddl": "DDL.sql",
"template": "numeric-decimals.sql",
"normalizer": "fuzzynormalizer.py"},
"numeric-ints": {"schema": "int-schema.py",
"ddl": "int-DDL.sql",
"template": "numeric-ints.sql",
"normalizer": "normalizer.py"},
# HSQL SEEMS TO HAVE A BAD DEFAULT PRECISION, DISABLING
# "advanced-decimal": {"schema": "decimal-schema.py",
# "ddl": "DDL.sql",
# "template": "advanced-decimal.sql",
# "normalizer": "normalizer.py"},
"advanced-joins": {"schema": "schema.py",
"ddl": "DDL.sql",
"template": "advanced-joins.sql",
"normalizer": "normalizer.py"},
"advanced-index-joins": {"schema": "schema.py",
"ddl": "index-DDL.sql",
"template": "advanced-joins.sql",
"normalizer": "normalizer.py"},
"advanced-subq-joins": {"schema": "schema.py",
"ddl": "DDL.sql",
"template": "advanced-subq-joins.sql",
"normalizer": "normalizer.py"},
"advanced-subq-part-joins": {"schema": "schema.py",
"ddl": "subq-part-DDL.sql",
"template": "advanced-subq-part-joins.sql",
"normalizer": "normalizer.py"},
"advanced-compoundex-joins": {"schema": "schema.py",
"ddl": "compoundex-DDL.sql",
"template": "advanced-joins.sql",
"normalizer": "normalizer.py"},
"advanced-matview-subq-nonjoin": {"schema": "matview-advanced-nonjoin-schema.py",
"ddl": "matview-DDL.sql",
"template": "advanced-matview-subq-nonjoin.sql",
"normalizer": "normalizer.py"},
"insert-into-select": {"schema": "insert-into-select-schema.py",
"ddl": "insert-into-select-DDL.sql",
"template": "insert-into-select.sql",
"normalizer": "normalizer.py"},
}
|
agpl-3.0
|
altova/sec-edgar-tools
|
sec_filing_to_xlsx.py
|
1
|
11948
|
# Copyright 2015 Altova GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__copyright__ = "Copyright 2015-2017 Altova GmbH"
__license__ = 'http://www.apache.org/licenses/LICENSE-2.0'
# This script generates Excel reports from a SEC EDGAR filing.
# NOTE: You must first download the source code of the 3rd party Python module xlsxwriter from https://pypi.python.org/pypi/XlsxWriter
# and extract the xslxwriter folder in the archive to the lib/python3.4 subfolder of the RaptorXML server installation directory.
#
# Example invocation:
# raptorxmlxbrl valxbrl --script=sec_filing_to_xlsx.py nanonull.xbrl
import os, datetime, itertools
from altova import *
try:
import xlsxwriter
except:
raise ImportError('Please install the 3rd party python module xlsxwrite from https://pypi.python.org/pypi/XlsxWriter')
lang='en-US'
formats = {}
def isPeriodStart(role):
return role in (
'http://www.xbrl.org/2003/role/periodStartLabel',
'http://xbrl.us/us-gaap/role/label/negatedPeriodStart',
'http://www.xbrl.org/2009/role/negatedPeriodStartLabel'
)
def isPeriodEnd(role):
return role in (
'http://www.xbrl.org/2003/role/periodEndLabel',
'http://xbrl.us/us-gaap/role/label/negatedPeriodEnd',
'http://www.xbrl.org/2009/role/negatedPeriodEndLabel'
)
def isTotal(role):
return role in (
'http://www.xbrl.org/2003/role/totalLabel',
'http://xbrl.us/us-gaap/role/label/negatedTotal',
'http://www.xbrl.org/2009/role/negatedTotalLabel'
)
def isNegated(role):
return role in (
'http://xbrl.us/us-gaap/role/label/negated',
'http://www.xbrl.org/2009/role/negatedLabel',
'http://www.xbrl.org/2009/role/negatedNetLabel',
'http://xbrl.us/us-gaap/role/label/negatedPeriodEnd',
'http://www.xbrl.org/2009/role/negatedPeriodEndLabel',
'http://xbrl.us/us-gaap/role/label/negatedPeriodStart',
'http://www.xbrl.org/2009/role/negatedPeriodStartLabel',
'http://www.xbrl.org/2009/role/negatedTerseLabel',
'http://xbrl.us/us-gaap/role/label/negatedTotal',
'http://www.xbrl.org/2009/role/negatedTotalLabel'
)
def domainMembersFromPresentationTreeRecursive(network,parent,domain_members):
for rel in network.relationships_from(parent):
domain_members.append(rel.target)
domainMembersFromPresentationTreeRecursive(network,rel.target,domain_members)
def conceptsFromPresentationTreeRecursive(network,parent,concepts):
for rel in network.relationships_from(parent):
if not rel.target.abstract:
concepts.append((rel.target,rel.preferred_label))
conceptsFromPresentationTreeRecursive(network,rel.target,concepts)
def analyzePresentationTree(network,roots):
concepts = []
dimensions = {}
for rel in network.relationships_from(roots[0]):
if isinstance(rel.target,xbrl.xdt.Hypercube):
for rel2 in network.relationships_from(rel.target):
if isinstance(rel2.target,xbrl.xdt.Dimension):
domainMembersFromPresentationTreeRecursive(network,rel2.target,dimensions.setdefault(rel2.target,[]))
else:
conceptsFromPresentationTreeRecursive(network,rel2.target,concepts)
else:
conceptsFromPresentationTreeRecursive(network,rel.target,concepts)
return concepts, dimensions
def calcTableData(instance,role,contexts,concepts,dimensions):
table = {'columns': [], 'height': len(concepts)}
bIsCashFlow = 'cash' in role[1].lower() and 'flow' in role[1].lower()
for context in contexts:
cs = xbrl.ConstraintSet(context)
period = cs[xbrl.Aspect.PERIOD]
dimension_aspects = [value for aspect,value in cs.items() if isinstance(aspect,xbrl.xdt.Dimension)]
bEliminate = False
for val in dimension_aspects:
domain = dimensions.get(val.dimension,None)
if not domain or val.value not in domain:
bEliminate = True
for dim in set(dimensions.keys())-set([value.dimension for value in dimension_aspects]):
if dim.default_member and dim.default_member not in dimensions[dim]:
bEliminate = True
if bEliminate:
continue
bEmpty = True
bHasCash = False
column = {'period': period, 'dimensions': dimension_aspects, 'rows': []}
for concept in concepts:
cs[xbrl.Aspect.CONCEPT] = concept[0]
if isPeriodStart(concept[1]):
if period.period_type == xbrl.PeriodType.START_END:
cs[xbrl.Aspect.PERIOD] = xbrl.PeriodAspectValue.from_instant(period.start)
else:
column['rows'].append({'concept': concept, 'facts': xbrl.FactSet()})
continue
elif isPeriodEnd(concept[1]):
if period.period_type == xbrl.PeriodType.START_END:
cs[xbrl.Aspect.PERIOD] = xbrl.PeriodAspectValue.from_instant(period.end)
else:
column['rows'].append({'concept': concept, 'facts': xbrl.FactSet()})
continue
else:
cs[xbrl.Aspect.PERIOD] = period
facts = instance.facts.filter(cs,allow_additional_dimensions=False)
if len(facts):
bEmpty = False
if bIsCashFlow and not bHasCash and concept[0].is_duration():
bHasCash = 'cash' in next(iter(concept[0].labels(label_role=concept[1],lang=lang))).text.lower()
column['rows'].append({'concept': concept, 'facts': facts})
if not bEmpty and (not bIsCashFlow or bHasCash):
table['columns'].append(column)
return table
def formatConcept(concept):
preferredLabel = concept[1] if concept[1] else 'http://www.xbrl.org/2003/role/label'
labels = list(concept[0].labels(label_role=preferredLabel,lang=lang))
if labels:
return labels[0].text
return str(concept[0].qname)
def formatPeriod(period):
if period.period_type == xbrl.PeriodType.INSTANT:
return period.instant.strftime('%d. %B %Y')
elif period.period_type == xbrl.PeriodType.START_END:
return '%s to %s' % (period.start.strftime('%d. %B %Y'), period.end.strftime('%d. %B %Y'))
elif period.period_type == xbrl.PeriodType.FOREVER:
return 'Forever'
return ''
def formatDimensionValue(dimValue):
return formatConcept((dimValue.value,'http://www.xbrl.org/2003/role/terseLabel'))
def formatFact(dts,fact,preferredLabel=None):
if fact.xsi_nil:
return ('#N/A',None)
elif fact.concept.is_numeric():
if fact.concept.is_fraction():
val = fact.effective_fraction_value
else:
val = fact.effective_numeric_value
if isNegated(preferredLabel):
val *= -1
if fact.concept.is_monetary():
if isTotal(preferredLabel):
return (val,formats['monetary_total'])
return (val,formats['monetary'])
return (val,None)
elif fact.concept.is_qname():
concept = dts.resolve_concept(fact.qname_value)
if concept:
for label in concept.labels():
return (label.text,None)
return (str(fact.qname_value),None)
else:
return (fact.normalized_value,None)
def getDuration(column):
p = column['period']
if p.period_type == xbrl.PeriodType.INSTANT:
return 0
return (p.end.year - p.start.year) * 12 + p.end.month - p.start.month
def getEndDate(column):
p = column['period']
if p.period_type == xbrl.PeriodType.INSTANT:
return p.instant
return p.end
def generateTable(workbook, dts, role, table):
columns = sorted(table['columns'],key=lambda x: (-getDuration(x),getEndDate(x)),reverse=True)
worksheet = workbook.add_worksheet(role[1].split(' - ')[0])
worksheet.set_column(0,0,70)
worksheet.set_column(1,1+len(table['columns']),20)
worksheet.write(0,0,role[1].split(' - ')[2],formats['caption'])
col = 1
row_start = 1
for duration, group in itertools.groupby(columns,key=getDuration):
cols = list(group)
if duration > 0:
if len(cols) > 1:
worksheet.merge_range(0,col,0,col+len(cols)-1,'%d Months Ended' % getDuration(cols[0]),formats['center'])
else:
worksheet.write(0,col,'%d Months Ended' % getDuration(cols[0]),formats['center'])
row = 1
else:
row = 0
for column in cols:
worksheet.write(row,col,getEndDate(column)-datetime.timedelta(days=1),formats['date'])
for i, dimValue in enumerate(column['dimensions']):
dimLabel = formatDimensionValue(dimValue)
if '[Domain]' not in dimLabel:
worksheet.write(row+1+i,col,dimLabel)
col += 1
row_start = max(row_start,row+2+len(column['dimensions']))
for row in range(table['height']):
concept = columns[0]['rows'][row]['concept']
worksheet.write(row_start+row,0,formatConcept(concept),formats['header'])
for col, column in enumerate(columns):
for fact in column['rows'][row]['facts']:
worksheet.write(row_start+row,1+col,*formatFact(dts,fact,concept[1]))
footnotes = [footnote.text for footnote in fact.footnotes(lang=lang)]
if footnotes:
worksheet.write_comment(row_start+row,1+col,'\n'.join(footnotes),{'x_scale':5,'y_scale':2})
def generateTables(path, dts, instance):
global formats
workbook = xlsxwriter.Workbook(path)
formats['center'] = workbook.add_format({'align':'center'})
formats['caption'] = workbook.add_format({'text_wrap':True,'bold':True})
formats['header'] = workbook.add_format({'text_wrap':True})
formats['date'] = workbook.add_format({'num_format':'mmm. d, yyyy','bold':True})
formats['monetary'] = workbook.add_format({'num_format': '#,##0_);[Red](#,##0)'})
formats['monetary_total'] = workbook.add_format({'num_format': '#,##0_);[Red](#,##0)', 'underline':33})
# Calculate table data
tables = {}
contexts = list(instance.contexts)
roles = [(role, dts.role_type(role).definition.value) for role in dts.presentation_link_roles()]
roles = sorted(roles, key=lambda role: role[1].split(' - ')[0])
for role in roles:
presentation_network = dts.presentation_base_set(role[0]).network_of_relationships()
roots = list(presentation_network.roots)
tables[role] = calcTableData(instance,role,contexts,*analyzePresentationTree(presentation_network,roots))
# Generate excel sheet for each non-empty table
for role in roles:
if tables[role]['columns']:
generateTable(workbook, dts, role, tables[role])
workbook.close()
# Main entry point, will be called by RaptorXML after the XBRL instance validation job has finished
def on_xbrl_finished(job, instance):
# instance object will be None if XBRL 2.1 validation was not successful
if instance:
path = os.path.join(job.output_dir,'table.xlsx')
generateTables(path, instance.dts, instance)
# Register new output file with RaptorXML engine
job.append_output_filename(path)
|
apache-2.0
|
Kyly/mustaske
|
test/selenium_src/leave_room.py
|
1
|
3018
|
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class LeaveRoom(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost:3000/"
self.verificationErrors = []
self.accept_next_alert = True
def test_leave_room(self):
driver = self.driver
driver.get(self.base_url)
driver.find_element_by_css_selector("input.form-control").clear()
driver.find_element_by_css_selector("input.form-control").send_keys("Test_Room")
driver.find_element_by_id("make-room").click()
ownerRoomName = driver.find_element_by_css_selector("span.navbar-brand.room-name").text
self.assertEqual("Test_Room",ownerRoomName)
driver.find_element_by_css_selector("span.fa.fa-cogs").click()
ownerRoomID = driver.find_element_by_class_name("drop-down-room-id").text
driver.execute_script("$(window.open('"+self.base_url+"'))")
driver.switch_to_window(driver.window_handles[-1])
driver.find_element_by_css_selector("input.form-control").clear()
driver.find_element_by_css_selector("input.form-control").send_keys(ownerRoomID)
driver.find_element_by_id("join-room").click()
audienceRoomName = driver.find_element_by_css_selector("span.navbar-brand.room-name").text
self.assertEqual(ownerRoomName,audienceRoomName)
driver.find_element_by_css_selector("span.fa.fa-cogs").click()
audienceRoomID = driver.find_element_by_class_name("drop-down-room-id").text
self.assertEqual(ownerRoomID,audienceRoomID)
driver.find_element_by_xpath("//li/ul/li[4]/a/span").click()
try: self.assertTrue(self.is_element_present(By.ID, "join-create-room"))
except AssertionError as e: self.verificationErrors.append(str(e))
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException as e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
mit
|
tsdmgz/ansible
|
lib/ansible/modules/cloud/amazon/ecs_ecr.py
|
7
|
11243
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_ecr
version_added: "2.3"
short_description: Manage Elastic Container Registry repositories
description:
- Manage Elastic Container Registry repositories
requirements: [ boto3 ]
options:
name:
description:
- the name of the repository
required: true
registry_id:
description:
- AWS account id associated with the registry.
- If not specified, the default registry is assumed.
required: false
policy:
description:
- JSON or dict that represents the new policy
required: false
force_set_policy:
description:
- if no, prevents setting a policy that would prevent you from
setting another policy in the future.
required: false
default: false
delete_policy:
description:
- if yes, remove the policy from the repository
required: false
default: false
state:
description:
- create or destroy the repository
required: false
choices: [present, absent]
default: 'present'
author:
- David M. Lee (@leedm777)
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# If the repository does not exist, it is created. If it does exist, would not
# affect any policies already on it.
- name: ecr-repo
ecs_ecr: name=super/cool
- name: destroy-ecr-repo
ecs_ecr: name=old/busted state=absent
- name: Cross account ecr-repo
ecs_ecr: registry_id=999999999999 name=cross/account
- name: set-policy as object
ecs_ecr:
name: needs-policy-object
policy:
Version: '2008-10-17'
Statement:
- Sid: read-only
Effect: Allow
Principal:
AWS: '{{ read_only_arn }}'
Action:
- ecr:GetDownloadUrlForLayer
- ecr:BatchGetImage
- ecr:BatchCheckLayerAvailability
- name: set-policy as string
ecs_ecr:
name: needs-policy-string
policy: "{{ lookup('template', 'policy.json.j2') }}"
- name: delete-policy
ecs_ecr:
name: needs-no-policy
delete_policy: yes
'''
RETURN = '''
state:
type: string
description: The asserted state of the repository (present, absent)
returned: always
created:
type: boolean
description: If true, the repository was created
returned: always
name:
type: string
description: The name of the repository
returned: "when state == 'absent'"
repository:
type: dict
description: The created or updated repository
returned: "when state == 'present'"
sample:
createdAt: '2017-01-17T08:41:32-06:00'
registryId: '999999999999'
repositoryArn: arn:aws:ecr:us-east-1:999999999999:repository/ecr-test-1484664090
repositoryName: ecr-test-1484664090
repositoryUri: 999999999999.dkr.ecr.us-east-1.amazonaws.com/ecr-test-1484664090
'''
import json
import traceback
try:
from botocore.exceptions import ClientError
except ImportError:
pass # Taken care of by ec2.HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO3, boto3_conn, boto_exception, ec2_argument_spec,
get_aws_connection_info, sort_json_policy_dict)
def build_kwargs(registry_id):
"""
Builds a kwargs dict which may contain the optional registryId.
:param registry_id: Optional string containing the registryId.
:return: kwargs dict with registryId, if given
"""
if not registry_id:
return dict()
else:
return dict(registryId=registry_id)
class EcsEcr:
def __init__(self, module):
region, ec2_url, aws_connect_kwargs = \
get_aws_connection_info(module, boto3=True)
self.ecr = boto3_conn(module, conn_type='client',
resource='ecr', region=region,
endpoint=ec2_url, **aws_connect_kwargs)
self.check_mode = module.check_mode
self.changed = False
self.skipped = False
def get_repository(self, registry_id, name):
try:
res = self.ecr.describe_repositories(
repositoryNames=[name], **build_kwargs(registry_id))
repos = res.get('repositories')
return repos and repos[0]
except ClientError as err:
code = err.response['Error'].get('Code', 'Unknown')
if code == 'RepositoryNotFoundException':
return None
raise
def get_repository_policy(self, registry_id, name):
try:
res = self.ecr.get_repository_policy(
repositoryName=name, **build_kwargs(registry_id))
text = res.get('policyText')
return text and json.loads(text)
except ClientError as err:
code = err.response['Error'].get('Code', 'Unknown')
if code == 'RepositoryPolicyNotFoundException':
return None
raise
def create_repository(self, registry_id, name):
if not self.check_mode:
repo = self.ecr.create_repository(
repositoryName=name, **build_kwargs(registry_id)).get(
'repository')
self.changed = True
return repo
else:
self.skipped = True
return dict(repositoryName=name)
def set_repository_policy(self, registry_id, name, policy_text, force):
if not self.check_mode:
policy = self.ecr.set_repository_policy(
repositoryName=name,
policyText=policy_text,
force=force,
**build_kwargs(registry_id))
self.changed = True
return policy
else:
self.skipped = True
if self.get_repository(registry_id, name) is None:
printable = name
if registry_id:
printable = '{}:{}'.format(registry_id, name)
raise Exception(
'could not find repository {}'.format(printable))
return
def delete_repository(self, registry_id, name):
if not self.check_mode:
repo = self.ecr.delete_repository(
repositoryName=name, **build_kwargs(registry_id))
self.changed = True
return repo
else:
repo = self.get_repository(registry_id, name)
if repo:
self.skipped = True
return repo
return None
def delete_repository_policy(self, registry_id, name):
if not self.check_mode:
policy = self.ecr.delete_repository_policy(
repositoryName=name, **build_kwargs(registry_id))
self.changed = True
return policy
else:
policy = self.get_repository_policy(registry_id, name)
if policy:
self.skipped = True
return policy
return None
def run(ecr, params, verbosity):
# type: (EcsEcr, dict, int) -> Tuple[bool, dict]
result = {}
try:
name = params['name']
state = params['state']
policy_text = params['policy']
delete_policy = params['delete_policy']
registry_id = params['registry_id']
force_set_policy = params['force_set_policy']
# If a policy was given, parse it
policy = policy_text and json.loads(policy_text)
result['state'] = state
result['created'] = False
repo = ecr.get_repository(registry_id, name)
if state == 'present':
result['created'] = False
if not repo:
repo = ecr.create_repository(registry_id, name)
result['changed'] = True
result['created'] = True
result['repository'] = repo
if delete_policy:
original_policy = ecr.get_repository_policy(registry_id, name)
if verbosity >= 2:
result['policy'] = None
if verbosity >= 3:
result['original_policy'] = original_policy
if original_policy:
ecr.delete_repository_policy(registry_id, name)
result['changed'] = True
elif policy_text is not None:
try:
policy = sort_json_policy_dict(policy)
if verbosity >= 2:
result['policy'] = policy
original_policy = ecr.get_repository_policy(
registry_id, name)
if original_policy:
original_policy = sort_json_policy_dict(original_policy)
if verbosity >= 3:
result['original_policy'] = original_policy
if original_policy != policy:
ecr.set_repository_policy(
registry_id, name, policy_text, force_set_policy)
result['changed'] = True
except:
# Some failure w/ the policy. It's helpful to know what the
# policy is.
result['policy'] = policy_text
raise
elif state == 'absent':
result['name'] = name
if repo:
ecr.delete_repository(registry_id, name)
result['changed'] = True
except Exception as err:
msg = str(err)
if isinstance(err, ClientError):
msg = boto_exception(err)
result['msg'] = msg
result['exception'] = traceback.format_exc()
return False, result
if ecr.skipped:
result['skipped'] = True
if ecr.changed:
result['changed'] = True
return True, result
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
registry_id=dict(required=False),
state=dict(required=False, choices=['present', 'absent'],
default='present'),
force_set_policy=dict(required=False, type='bool', default=False),
policy=dict(required=False, type='json'),
delete_policy=dict(required=False, type='bool')))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['policy', 'delete_policy']])
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
ecr = EcsEcr(module)
passed, result = run(ecr, module.params, module._verbosity)
if passed:
module.exit_json(**result)
else:
module.fail_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
kailIII/geraldo
|
site/newsite/django_1_0/django/contrib/sessions/tests.py
|
11
|
5813
|
r"""
>>> from django.conf import settings
>>> from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
>>> from django.contrib.sessions.backends.cache import SessionStore as CacheSession
>>> from django.contrib.sessions.backends.file import SessionStore as FileSession
>>> from django.contrib.sessions.backends.base import SessionBase
>>> db_session = DatabaseSession()
>>> db_session.modified
False
>>> db_session['cat'] = "dog"
>>> db_session.modified
True
>>> db_session.pop('cat')
'dog'
>>> db_session.pop('some key', 'does not exist')
'does not exist'
>>> db_session.save()
>>> db_session.exists(db_session.session_key)
True
>>> db_session.delete(db_session.session_key)
>>> db_session.exists(db_session.session_key)
False
>>> file_session = FileSession()
>>> file_session.modified
False
>>> file_session['cat'] = "dog"
>>> file_session.modified
True
>>> file_session.pop('cat')
'dog'
>>> file_session.pop('some key', 'does not exist')
'does not exist'
>>> file_session.save()
>>> file_session.exists(file_session.session_key)
True
>>> file_session.delete(file_session.session_key)
>>> file_session.exists(file_session.session_key)
False
# Make sure the file backend checks for a good storage dir
>>> settings.SESSION_FILE_PATH = "/if/this/directory/exists/you/have/a/weird/computer"
>>> FileSession()
Traceback (innermost last):
...
ImproperlyConfigured: The session storage path '/if/this/directory/exists/you/have/a/weird/computer' doesn't exist. Please set your SESSION_FILE_PATH setting to an existing directory in which Django can store session data.
>>> cache_session = CacheSession()
>>> cache_session.modified
False
>>> cache_session['cat'] = "dog"
>>> cache_session.modified
True
>>> cache_session.pop('cat')
'dog'
>>> cache_session.pop('some key', 'does not exist')
'does not exist'
>>> cache_session.save()
>>> cache_session.delete(cache_session.session_key)
>>> cache_session.exists(cache_session.session_key)
False
>>> s = SessionBase()
>>> s._session['some key'] = 'exists' # Pre-populate the session with some data
>>> s.accessed = False # Reset to pretend this wasn't accessed previously
>>> s.accessed, s.modified
(False, False)
>>> s.pop('non existant key', 'does not exist')
'does not exist'
>>> s.accessed, s.modified
(True, False)
>>> s.setdefault('foo', 'bar')
'bar'
>>> s.setdefault('foo', 'baz')
'bar'
>>> s.accessed = False # Reset the accessed flag
>>> s.pop('some key')
'exists'
>>> s.accessed, s.modified
(True, True)
>>> s.pop('some key', 'does not exist')
'does not exist'
>>> s.get('update key', None)
# test .update()
>>> s.modified = s.accessed = False # Reset to pretend this wasn't accessed previously
>>> s.update({'update key':1})
>>> s.accessed, s.modified
(True, True)
>>> s.get('update key', None)
1
# test .has_key()
>>> s.modified = s.accessed = False # Reset to pretend this wasn't accessed previously
>>> s.has_key('update key')
True
>>> s.accessed, s.modified
(True, False)
# test .values()
>>> s = SessionBase()
>>> s.values()
[]
>>> s.accessed
True
>>> s['x'] = 1
>>> s.values()
[1]
# test .iterkeys()
>>> s.accessed = False
>>> i = s.iterkeys()
>>> hasattr(i,'__iter__')
True
>>> s.accessed
True
>>> list(i)
['x']
# test .itervalues()
>>> s.accessed = False
>>> i = s.itervalues()
>>> hasattr(i,'__iter__')
True
>>> s.accessed
True
>>> list(i)
[1]
# test .iteritems()
>>> s.accessed = False
>>> i = s.iteritems()
>>> hasattr(i,'__iter__')
True
>>> s.accessed
True
>>> list(i)
[('x', 1)]
#########################
# Custom session expiry #
#########################
>>> from django.conf import settings
>>> from datetime import datetime, timedelta
>>> td10 = timedelta(seconds=10)
# A normal session has a max age equal to settings
>>> s.get_expiry_age() == settings.SESSION_COOKIE_AGE
True
# So does a custom session with an idle expiration time of 0 (but it'll expire
# at browser close)
>>> s.set_expiry(0)
>>> s.get_expiry_age() == settings.SESSION_COOKIE_AGE
True
# Custom session idle expiration time
>>> s.set_expiry(10)
>>> delta = s.get_expiry_date() - datetime.now()
>>> delta.seconds in (9, 10)
True
>>> age = s.get_expiry_age()
>>> age in (9, 10)
True
# Custom session fixed expiry date (timedelta)
>>> s.set_expiry(td10)
>>> delta = s.get_expiry_date() - datetime.now()
>>> delta.seconds in (9, 10)
True
>>> age = s.get_expiry_age()
>>> age in (9, 10)
True
# Custom session fixed expiry date (fixed datetime)
>>> s.set_expiry(datetime.now() + td10)
>>> delta = s.get_expiry_date() - datetime.now()
>>> delta.seconds in (9, 10)
True
>>> age = s.get_expiry_age()
>>> age in (9, 10)
True
# Set back to default session age
>>> s.set_expiry(None)
>>> s.get_expiry_age() == settings.SESSION_COOKIE_AGE
True
# Allow to set back to default session age even if no alternate has been set
>>> s.set_expiry(None)
# We're changing the setting then reverting back to the original setting at the
# end of these tests.
>>> original_expire_at_browser_close = settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
>>> settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# Custom session age
>>> s.set_expiry(10)
>>> s.get_expire_at_browser_close()
False
# Custom expire-at-browser-close
>>> s.set_expiry(0)
>>> s.get_expire_at_browser_close()
True
# Default session age
>>> s.set_expiry(None)
>>> s.get_expire_at_browser_close()
False
>>> settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# Custom session age
>>> s.set_expiry(10)
>>> s.get_expire_at_browser_close()
False
# Custom expire-at-browser-close
>>> s.set_expiry(0)
>>> s.get_expire_at_browser_close()
True
# Default session age
>>> s.set_expiry(None)
>>> s.get_expire_at_browser_close()
True
>>> settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = original_expire_at_browser_close
"""
if __name__ == '__main__':
import doctest
doctest.testmod()
|
lgpl-3.0
|
jayvdb/flake8-copyright
|
setup.py
|
1
|
1860
|
# -=- encoding: utf-8 -=-
#
# Copyright (C) 2014 Savoir-faire Linux Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from setuptools import setup
def get_version(fname='flake8_copyright.py'):
with open(fname) as f:
for line in f:
if line.startswith('__version__'):
return eval(line.split('=')[-1])
def get_long_description():
descr = []
for fname in ('README.rst',):
with open(fname) as f:
descr.append(f.read())
return '\n\n'.join(descr)
setup(
name='flake8-copyright',
version=get_version(),
description='Adds copyright checks to flake8',
long_description=get_long_description(),
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
author='Virgil Dupras',
author_email='virgil.dupras@savoirfairelinux.com',
url='https://github.com/savoirfairelinux/flake8-copyright',
keywords='pep8 flake8 copyright',
py_modules=['flake8_copyright'],
install_requires=[
'setuptools',
],
entry_points={
'flake8.extension': ['flake8_copyright = flake8_copyright:CopyrightChecker'],
},
)
|
gpl-3.0
|
e-q/scipy
|
scipy/_lib/tests/test__util.py
|
4
|
7974
|
from multiprocessing import Pool
from multiprocessing.pool import Pool as PWL
import os
import math
import numpy as np
from numpy.testing import assert_equal, assert_
import pytest
from pytest import raises as assert_raises, deprecated_call
import scipy
from scipy._lib._util import (_aligned_zeros, check_random_state, MapWrapper,
getfullargspec_no_self, FullArgSpec,
rng_integers)
def test__aligned_zeros():
niter = 10
def check(shape, dtype, order, align):
err_msg = repr((shape, dtype, order, align))
x = _aligned_zeros(shape, dtype, order, align=align)
if align is None:
align = np.dtype(dtype).alignment
assert_equal(x.__array_interface__['data'][0] % align, 0)
if hasattr(shape, '__len__'):
assert_equal(x.shape, shape, err_msg)
else:
assert_equal(x.shape, (shape,), err_msg)
assert_equal(x.dtype, dtype)
if order == "C":
assert_(x.flags.c_contiguous, err_msg)
elif order == "F":
if x.size > 0:
# Size-0 arrays get invalid flags on NumPy 1.5
assert_(x.flags.f_contiguous, err_msg)
elif order is None:
assert_(x.flags.c_contiguous, err_msg)
else:
raise ValueError()
# try various alignments
for align in [1, 2, 3, 4, 8, 16, 32, 64, None]:
for n in [0, 1, 3, 11]:
for order in ["C", "F", None]:
for dtype in [np.uint8, np.float64]:
for shape in [n, (1, 2, 3, n)]:
for j in range(niter):
check(shape, dtype, order, align)
def test_check_random_state():
# If seed is None, return the RandomState singleton used by np.random.
# If seed is an int, return a new RandomState instance seeded with seed.
# If seed is already a RandomState instance, return it.
# Otherwise raise ValueError.
rsi = check_random_state(1)
assert_equal(type(rsi), np.random.RandomState)
rsi = check_random_state(rsi)
assert_equal(type(rsi), np.random.RandomState)
rsi = check_random_state(None)
assert_equal(type(rsi), np.random.RandomState)
assert_raises(ValueError, check_random_state, 'a')
if hasattr(np.random, 'Generator'):
# np.random.Generator is only available in NumPy >= 1.17
rg = np.random.Generator(np.random.PCG64())
rsi = check_random_state(rg)
assert_equal(type(rsi), np.random.Generator)
def test_getfullargspec_no_self():
p = MapWrapper(1)
argspec = getfullargspec_no_self(p.__init__)
assert_equal(argspec, FullArgSpec(['pool'], None, None, (1,), [], None, {}))
argspec = getfullargspec_no_self(p.__call__)
assert_equal(argspec, FullArgSpec(['func', 'iterable'], None, None, None, [], None, {}))
class _rv_generic(object):
def _rvs(self, a, b=2, c=3, *args, size=None, **kwargs):
return None
rv_obj = _rv_generic()
argspec = getfullargspec_no_self(rv_obj._rvs)
assert_equal(argspec, FullArgSpec(['a', 'b', 'c'], 'args', 'kwargs', (2, 3), ['size'], {'size': None}, {}))
def test_mapwrapper_serial():
in_arg = np.arange(10.)
out_arg = np.sin(in_arg)
p = MapWrapper(1)
assert_(p._mapfunc is map)
assert_(p.pool is None)
assert_(p._own_pool is False)
out = list(p(np.sin, in_arg))
assert_equal(out, out_arg)
with assert_raises(RuntimeError):
p = MapWrapper(0)
def test_pool():
with Pool(2) as p:
p.map(math.sin, [1,2,3, 4])
def test_mapwrapper_parallel():
in_arg = np.arange(10.)
out_arg = np.sin(in_arg)
with MapWrapper(2) as p:
out = p(np.sin, in_arg)
assert_equal(list(out), out_arg)
assert_(p._own_pool is True)
assert_(isinstance(p.pool, PWL))
assert_(p._mapfunc is not None)
# the context manager should've closed the internal pool
# check that it has by asking it to calculate again.
with assert_raises(Exception) as excinfo:
p(np.sin, in_arg)
assert_(excinfo.type is ValueError)
# can also set a PoolWrapper up with a map-like callable instance
try:
p = Pool(2)
q = MapWrapper(p.map)
assert_(q._own_pool is False)
q.close()
# closing the PoolWrapper shouldn't close the internal pool
# because it didn't create it
out = p.map(np.sin, in_arg)
assert_equal(list(out), out_arg)
finally:
p.close()
# get our custom ones and a few from the "import *" cases
@pytest.mark.parametrize(
'key', ('ifft', 'diag', 'arccos', 'randn', 'rand', 'array'))
def test_numpy_deprecation(key):
"""Test that 'from numpy import *' functions are deprecated."""
if key in ('ifft', 'diag', 'arccos'):
arg = [1.0, 0.]
elif key == 'finfo':
arg = float
else:
arg = 2
func = getattr(scipy, key)
match = r'scipy\.%s is deprecated.*2\.0\.0' % key
with deprecated_call(match=match) as dep:
func(arg) # deprecated
# in case we catch more than one dep warning
fnames = [os.path.splitext(d.filename)[0] for d in dep.list]
basenames = [os.path.basename(fname) for fname in fnames]
assert 'test__util' in basenames
if key in ('rand', 'randn'):
root = np.random
elif key == 'ifft':
root = np.fft
else:
root = np
func_np = getattr(root, key)
func_np(arg) # not deprecated
assert func_np is not func
# classes should remain classes
if isinstance(func_np, type):
assert isinstance(func, type)
def test_numpy_deprecation_functionality():
# Check that the deprecation wrappers don't break basic NumPy
# functionality
with deprecated_call():
x = scipy.array([1, 2, 3], dtype=scipy.float64)
assert x.dtype == scipy.float64
assert x.dtype == np.float64
x = scipy.finfo(scipy.float32)
assert x.eps == np.finfo(np.float32).eps
assert scipy.float64 == np.float64
assert issubclass(np.float64, scipy.float64)
def test_rng_integers():
rng = np.random.RandomState()
# test that numbers are inclusive of high point
arr = rng_integers(rng, low=2, high=5, size=100, endpoint=True)
assert np.max(arr) == 5
assert np.min(arr) == 2
assert arr.shape == (100, )
# test that numbers are inclusive of high point
arr = rng_integers(rng, low=5, size=100, endpoint=True)
assert np.max(arr) == 5
assert np.min(arr) == 0
assert arr.shape == (100, )
# test that numbers are exclusive of high point
arr = rng_integers(rng, low=2, high=5, size=100, endpoint=False)
assert np.max(arr) == 4
assert np.min(arr) == 2
assert arr.shape == (100, )
# test that numbers are exclusive of high point
arr = rng_integers(rng, low=5, size=100, endpoint=False)
assert np.max(arr) == 4
assert np.min(arr) == 0
assert arr.shape == (100, )
# now try with np.random.Generator
try:
rng = np.random.default_rng()
except AttributeError:
return
# test that numbers are inclusive of high point
arr = rng_integers(rng, low=2, high=5, size=100, endpoint=True)
assert np.max(arr) == 5
assert np.min(arr) == 2
assert arr.shape == (100, )
# test that numbers are inclusive of high point
arr = rng_integers(rng, low=5, size=100, endpoint=True)
assert np.max(arr) == 5
assert np.min(arr) == 0
assert arr.shape == (100, )
# test that numbers are exclusive of high point
arr = rng_integers(rng, low=2, high=5, size=100, endpoint=False)
assert np.max(arr) == 4
assert np.min(arr) == 2
assert arr.shape == (100, )
# test that numbers are exclusive of high point
arr = rng_integers(rng, low=5, size=100, endpoint=False)
assert np.max(arr) == 4
assert np.min(arr) == 0
assert arr.shape == (100, )
|
bsd-3-clause
|
berkmancenter/mediacloud
|
apps/common/tests/python/mediawords/languages/test_ro.py
|
1
|
5631
|
from unittest import TestCase
from mediawords.languages.ro import RomanianLanguage
# noinspection SpellCheckingInspection
class TestRomanianLanguage(TestCase):
def setUp(self):
self.__tokenizer = RomanianLanguage()
def test_language_code(self):
assert self.__tokenizer.language_code() == "ro"
def test_sample_sentence(self):
assert len(self.__tokenizer.sample_sentence())
def test_stop_words_map(self):
stop_words = self.__tokenizer.stop_words_map()
assert "acesta" in stop_words
assert "not_a_stopword" not in stop_words
def test_stem(self):
input_words = ["apropierea", "Splaiului"]
expected_stems = ["apropier", "splai"]
actual_stems = self.__tokenizer.stem_words(input_words)
assert expected_stems == actual_stems
def test_split_text_to_sentences(self):
input_text = """
În prezent, din întreg ansamblul mănăstirii s-a mai păstrat doar biserica și o clopotniță. Acestea se află
amplasate pe strada Sapienței din sectorul 5 al municipiului București, în spatele unor blocuri construite
în timpul regimului comunist, din apropierea Splaiului Independenței și a parcului Izvor. În 1813 Mănăstirea
Mihai-Vodă „era printre mănăstirile mari ale țării”.
"""
expected_sentences = [
'În prezent, din întreg ansamblul mănăstirii s-a mai păstrat doar biserica și o clopotniță.',
(
'Acestea se află amplasate pe strada Sapienței din sectorul 5 al municipiului București, în spatele '
'unor blocuri construite în timpul regimului comunist, din apropierea Splaiului Independenței și a '
'parcului Izvor.'
),
'În 1813 Mănăstirea Mihai-Vodă „era printre mănăstirile mari ale țării”.',
]
actual_sentences = self.__tokenizer.split_text_to_sentences(input_text)
assert expected_sentences == actual_sentences
def test_split_text_to_sentences_names(self):
"""Names ("Sf. Mc. Trifon" and others)."""
input_text = """
În prezent în interiorul bisericii există o raclă în care sunt păstrate moștele următorilor Sfinți: Sf. Ioan
Iacob Hozevitul, Sf. Xenia Petrovna, Sf. Teofil, Sf. Mc. Sevastiana, Sf. Mc. Ciprian, Sf. Mc. Iustina, Sf.
Mc. Clement, Sf. Mc. Trifon, Cuv. Auxenție, Sf. Dionisie Zakynthos, Sf. Mc. Anastasie, Sf. Mc. Panaghiotis,
Sf. Spiridon, Sf. Nifon II, Sf. Ignatie Zagorski, Sf. Prooroc Ioan Botezătorul, Cuv. Sava cel Sfințit, Sf.
Mc. Eustatie, Sf. Mc. Theodor Stratilat, Cuv. Paisie, Cuv. Stelian Paflagonul, Sf. Mc. Mercurie, Sf. Mc.
Arhidiacon Ștefan, Sf. Apostol Andrei, Sf. Mc. Dimitrie, Sf. Mc. Haralambie.
"""
expected_sentences = [
(
'În prezent în interiorul bisericii există o raclă în care sunt păstrate moștele următorilor Sfinți: '
'Sf. Ioan Iacob Hozevitul, Sf. Xenia Petrovna, Sf. Teofil, Sf. Mc. Sevastiana, Sf. Mc. Ciprian, Sf. '
'Mc. Iustina, Sf. Mc. Clement, Sf. Mc. Trifon, Cuv. Auxenție, Sf. Dionisie Zakynthos, Sf. Mc. '
'Anastasie, Sf. Mc. Panaghiotis, Sf. Spiridon, Sf. Nifon II, Sf. Ignatie Zagorski, Sf. Prooroc Ioan '
'Botezătorul, Cuv. Sava cel Sfințit, Sf. Mc. Eustatie, Sf. Mc. Theodor Stratilat, Cuv. Paisie, Cuv. '
'Stelian Paflagonul, Sf. Mc. Mercurie, Sf. Mc. Arhidiacon Ștefan, Sf. Apostol Andrei, Sf. Mc. '
'Dimitrie, Sf. Mc. Haralambie.'
),
]
actual_sentences = self.__tokenizer.split_text_to_sentences(input_text)
assert expected_sentences == actual_sentences
def test_split_text_to_sentences_abbreviation(self):
"""Abbreviation ("nr.4")."""
input_text = """
Translatarea în pantă a bisericii, pe o distanță de 289 m și coborâtă pe verticală cu 6,2 m, a avut loc în
anul 1985. Operațiune în sine de translatare a edificiului, de pe Dealul Mihai Vodă, fosta stradă a
Arhivelor nr.2 și până în locul în care se află și astăzi, Strada Sapienței nr.4, în apropierea malului
Dâmboviței, a fost considerată la vremea respectivă o performanță deosebită.
"""
expected_sentences = [
(
'Translatarea în pantă a bisericii, pe o distanță de 289 m și coborâtă pe verticală cu 6,2 m, a avut '
'loc în anul 1985.'
),
(
'Operațiune în sine de translatare a edificiului, de pe Dealul Mihai Vodă, fosta stradă a Arhivelor '
'nr.2 și până în locul în care se află și astăzi, Strada Sapienței nr.4, în apropierea malului '
'Dâmboviței, a fost considerată la vremea respectivă o performanță deosebită.'
),
]
actual_sentences = self.__tokenizer.split_text_to_sentences(input_text)
assert expected_sentences == actual_sentences
def test_split_sentence_to_words(self):
input_sentence = 'În 1813 Mănăstirea Mihai-Vodă „era printre mănăstirile mari ale țării”.'
expected_words = [
'în', '1813', 'mănăstirea', 'mihai-vodă', 'era', 'printre', 'mănăstirile', 'mari', 'ale', 'țării',
]
actual_words = self.__tokenizer.split_sentence_to_words(input_sentence)
assert expected_words == actual_words
|
agpl-3.0
|
TOC-Shard/moul-scripts
|
Python/system/encodings/cp1251.py
|
593
|
13617
|
""" Python Character Mapping Codec cp1251 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1251.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1251',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u0402' # 0x80 -> CYRILLIC CAPITAL LETTER DJE
u'\u0403' # 0x81 -> CYRILLIC CAPITAL LETTER GJE
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0453' # 0x83 -> CYRILLIC SMALL LETTER GJE
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u20ac' # 0x88 -> EURO SIGN
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\u0409' # 0x8A -> CYRILLIC CAPITAL LETTER LJE
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u040a' # 0x8C -> CYRILLIC CAPITAL LETTER NJE
u'\u040c' # 0x8D -> CYRILLIC CAPITAL LETTER KJE
u'\u040b' # 0x8E -> CYRILLIC CAPITAL LETTER TSHE
u'\u040f' # 0x8F -> CYRILLIC CAPITAL LETTER DZHE
u'\u0452' # 0x90 -> CYRILLIC SMALL LETTER DJE
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\u0459' # 0x9A -> CYRILLIC SMALL LETTER LJE
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u045a' # 0x9C -> CYRILLIC SMALL LETTER NJE
u'\u045c' # 0x9D -> CYRILLIC SMALL LETTER KJE
u'\u045b' # 0x9E -> CYRILLIC SMALL LETTER TSHE
u'\u045f' # 0x9F -> CYRILLIC SMALL LETTER DZHE
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u040e' # 0xA1 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045e' # 0xA2 -> CYRILLIC SMALL LETTER SHORT U
u'\u0408' # 0xA3 -> CYRILLIC CAPITAL LETTER JE
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\u0490' # 0xA5 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u0401' # 0xA8 -> CYRILLIC CAPITAL LETTER IO
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u0404' # 0xAA -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\u0407' # 0xAF -> CYRILLIC CAPITAL LETTER YI
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u0406' # 0xB2 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0456' # 0xB3 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0491' # 0xB4 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u0451' # 0xB8 -> CYRILLIC SMALL LETTER IO
u'\u2116' # 0xB9 -> NUMERO SIGN
u'\u0454' # 0xBA -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u0458' # 0xBC -> CYRILLIC SMALL LETTER JE
u'\u0405' # 0xBD -> CYRILLIC CAPITAL LETTER DZE
u'\u0455' # 0xBE -> CYRILLIC SMALL LETTER DZE
u'\u0457' # 0xBF -> CYRILLIC SMALL LETTER YI
u'\u0410' # 0xC0 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xC1 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0xC2 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0xC3 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0xC4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xC5 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0xC6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0xC7 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0xC8 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xC9 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xCA -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xCB -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xCC -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xCD -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xCE -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xCF -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0xD0 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xD1 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xD2 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xD3 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0xD4 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0xD5 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0xD6 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0xD7 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0xD8 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0xD9 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0xDA -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0xDB -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0xDC -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0xDD -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0xDE -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0xDF -> CYRILLIC CAPITAL LETTER YA
u'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
u'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
u'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
u'\u044f' # 0xFF -> CYRILLIC SMALL LETTER YA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
gpl-3.0
|
mohitsethi/solum
|
solum/api/controllers/v1/extension.py
|
1
|
2846
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from pecan import rest
import six
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from solum.api.controllers import common_types
from solum.api.controllers.v1 import types as api_types
from solum.openstack.common.gettextutils import _
class Extension(api_types.Base):
"""The Extension resource represents changes that the Provider has added
onto a Platform in addition to the ones supplied by Solum by default.
This may include additional protocol semantics, resource types,
application lifecycle states, resource attributes, etc. Anything may be
added, as long as it does not contradict the base functionality offered
by Solum.
"""
version = wtypes.text
"Version of the extension."
documentation = common_types.Uri
"Documentation URI to the extension."
@classmethod
def sample(cls):
return cls(uri='http://example.com/v1/extensions/mysql',
name='mysql',
type='extension',
tags=['large'],
project_id='1dae5a09ef2b4d8cbf3594b0eb4f6b94',
user_id='55f41cf46df74320b9486a35f5d28a11',
description='A mysql extension',
version='2.13',
documentation='http://example.com/docs/ext/mysql')
class ExtensionController(rest.RestController):
"""Manages operations on a single extension."""
def __init__(self, extension_id):
pecan.request.context['extension_id'] = extension_id
self._id = extension_id
@wsme_pecan.wsexpose(Extension, wtypes.text)
def get(self):
"""Return this extension."""
error = _("Not implemented")
pecan.response.translatable_error = error
raise wsme.exc.ClientSideError(six.text_type(error))
class ExtensionsController(rest.RestController):
"""Manages operations on the extensions collection."""
@pecan.expose()
def _lookup(self, extension_id, *remainder):
if remainder and not remainder[-1]:
remainder = remainder[:-1]
return ExtensionController(extension_id), remainder
@wsme_pecan.wsexpose([Extension])
def get_all(self):
"""Return all extensions, based on the query provided."""
return []
|
apache-2.0
|
sebastienhupin/qxrad
|
qooxdoo/component/standalone/website/generate.py
|
6
|
3285
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2008 - 2012 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# LGPL: http://www.gnu.org/licenses/lgpl.html
# EPL: http://www.eclipse.org/org/documents/epl-v10.php
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * Thomas Herchenroeder (thron7)
#
################################################################################
##
# This is a stub proxy for the real generator.py
##
import sys, os, re, subprocess, codecs
CMD_PYTHON = sys.executable
QOOXDOO_PATH = '../../..'
# this is from misc.json, duplicated for decoupling
_eolComment = re.compile(r'(?<![a-zA-Z]:)//.*$', re.M) # double $ for string.Template
_mulComment = re.compile(r'/\*.*?\*/', re.S)
def stripComments(s):
b = _eolComment.sub('',s)
b = _mulComment.sub('',b)
return b
def getQxPath():
path = QOOXDOO_PATH
# OS env takes precedence
if os.environ.has_key("QOOXDOO_PATH"):
path = os.environ["QOOXDOO_PATH"]
# else use QOOXDOO_PATH from config.json
elif os.path.exists('config.json'):
# try json parsing with qx json
if not path.startswith('${'): # template macro has been resolved
sys.path.insert(0, path)
try:
from misc import json
got_json = True
except:
got_json = False
got_path = False
if got_json:
config_str = codecs.open('config.json', "r", "utf-8").read()
config_str = stripComments(config_str)
config = json.loads(config_str)
p = config.get("let")
if p:
p = p.get("QOOXDOO_PATH")
if p:
path = p
got_path = True
# regex parsing - error prone
if not got_path:
qpathr=re.compile(r'"QOOXDOO_PATH"\s*:\s*"([^"]*)"\s*,?')
conffile = open('config.json')
aconffile = conffile.readlines()
for line in aconffile:
mo = qpathr.search(line)
if mo:
path = mo.group(1)
break # assume first occurrence is ok
path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), path))
return path
os.chdir(os.path.dirname(os.path.abspath(sys.argv[0]))) # switch to skeleton dir
qxpath = getQxPath()
REAL_GENERATOR = os.path.join(qxpath, 'tool', 'bin', 'generator.py')
if not os.path.exists(REAL_GENERATOR):
print "Cannot find real generator script under: \"%s\"; aborting" % REAL_GENERATOR
sys.exit(1)
argList = []
argList.append(CMD_PYTHON)
argList.append(REAL_GENERATOR)
argList.extend(sys.argv[1:])
if sys.platform == "win32":
argList1=[]
for arg in argList:
if arg.find(' ')>-1:
argList1.append('"%s"' % arg)
else:
argList1.append(arg)
argList = argList1
else:
argList = ['"%s"' % x for x in argList] # quote argv elements
cmd = " ".join(argList)
retval = subprocess.call(cmd, shell=True)
sys.exit(retval)
|
lgpl-3.0
|
anash28/linux-agent
|
dattolib/__init__.py
|
2
|
1246
|
import sys
import os
import socket
import struct
from request_pb2 import Request
from reply_pb2 import Reply
IPC_SOCKET_PATH = "/var/run/dattod.sock"
class DattodConnectionError(Exception):
pass
# Send a Request to dattod over the unix socket IPC_SOCKET_PATH
def make_request_to_dattod(request):
# check if socket path exists, bail if not
if not os.path.exists(IPC_SOCKET_PATH):
print 'Abort: Socket does not exist!'
raise Exception("Socket does not exist")
request_serialized = request.SerializeToString()
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(IPC_SOCKET_PATH)
s.send(struct.pack('!I', len(request_serialized)))
s.send(request_serialized)
# receive 4 bytes containing the size of the Reply
length = s.recv(4)
length = struct.unpack('!I', length)[0]
# Create the reply
reply = Reply()
# Get the data from the network
data = s.recv(length)
# Interpret the data as a Reply object
reply.ParseFromString(data)
return reply
except socket.error as e:
raise DattodConnectionError("Unable to connect to dattod: " + str(e))
finally:
s.close()
|
gpl-2.0
|
amacd31/bom_data_parser
|
tests/test_hrs.py
|
1
|
2066
|
import os
import numpy as np
import pandas as pd
import unittest
from datetime import datetime
from bom_data_parser import read_hrs_csv
class HRSTest(unittest.TestCase):
def setUp(self):
self.test_cdo_file = os.path.join(os.path.dirname(__file__), 'data', 'HRS', '410730_daily_ts.csv')
def test_hrs(self):
data, attributes = read_hrs_csv(self.test_cdo_file)
self.assertTrue('Q' in data.columns)
self.assertTrue('QCode' in data.columns)
self.assertEqual(attributes['station_name'], 'Cotter River at Gingera (410730)')
self.assertEqual(attributes['catchment_area'], 130.0)
self.assertEqual(attributes['latitude'], 148.8212)
self.assertEqual(attributes['longitude'], -35.5917)
self.assertEqual(data.index[0], datetime(1963,7,5))
self.assertEqual(data.index[-1], datetime(2012,10,4))
self.assertAlmostEqual(data.Q.values[0], 127.312,3)
self.assertAlmostEqual(data.Q.values[-1], 186.238,3)
self.assertEqual(data.QCode.values[0], 10)
self.assertEqual(data.QCode.values[-1], 10)
def test_hrs_201510_format(self):
test_file = os.path.join(os.path.dirname(__file__), 'data', 'HRS', '410730_daily_ts_201510.csv')
data, attributes = read_hrs_csv(test_file)
self.assertTrue('Flow (ML)' in data.columns)
self.assertTrue('Bureau QCode' in data.columns)
self.assertEqual(attributes['station_name'], 'Cotter River at Gingera (410730)')
self.assertEqual(attributes['catchment_area'], 130.0)
self.assertEqual(attributes['latitude'], 148.8212)
self.assertEqual(attributes['longitude'], -35.5917)
self.assertEqual(data.index[0], datetime(1963,7,5))
self.assertEqual(data.index[-1], datetime(2014,12,31))
self.assertAlmostEqual(data['Flow (ML)'].values[0], 127.322,3)
self.assertAlmostEqual(data['Flow (ML)'].values[-1], 16.1915,4)
self.assertEqual(data['Bureau QCode'].values[0], 'A')
self.assertEqual(data['Bureau QCode'].values[-1], 'A')
|
bsd-3-clause
|
Microsoft/hummingbird
|
hummingbird/ml/exceptions.py
|
1
|
1451
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Common errors.
"""
_missing_converter = """
It usually means the pipeline being converted contains a
transformer or a predictor with no corresponding converter implemented.
Please fill an issue at https://github.com/microsoft/hummingbird.
"""
_missing_backend = """
It usually means the backend is not currently supported.
Please check the spelling or fill an issue at https://github.com/microsoft/hummingbird.
"""
_constant_error = """
It usually means a constant is not available or you are trying to override a constant value.
"""
class MissingConverter(RuntimeError):
"""
Raised when there is no registered converter for a machine learning operator.
"""
def __init__(self, msg):
super().__init__(msg + _missing_converter)
class MissingBackend(RuntimeError):
"""
Raised when the selected backend is not supported.
"""
def __init__(self, msg):
super().__init__(msg + _missing_backend)
class ConstantError(TypeError):
"""
Raised when a constant is not available or it get overwritten.
"""
def __init__(self, msg):
super().__init__(msg + _constant_error)
|
mit
|
javier3407/Plugin.Video.JavierTV
|
resources/tools/seriesyonkis.py
|
4
|
20563
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# Parser de SeriesYonkis para PalcoTV
# Version 0.1 (22.04.2015)
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Gracias a las librerías de pelisalacarta de Jesús (www.mimediacenter.info)
import os
import sys
import urllib
import urllib2
import re
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import re,urllib,urllib2,sys
import plugintools
from resources.tools.resolvers import *
thumbnail = 'http://oi58.tinypic.com/1jwwo6.jpg'
fanart = 'http://st-listas.20minutos.es/images/2012-06/335200/list_640px.jpg?1368294762'
referer = 'http://www.seriesflv.com/'
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
def seriesyonkis(params):
plugintools.log('[%s %s] SeriesYonkis %s' % (addonName, addonVersion, repr(params)))
url = 'http://www.seriesyonkis.sx/lista-de-series'
referer = 'http://www.seriesyonkis.sx/'
show = params.get("series_id") # Obtenemos modo de vista del usuario para series TV
if show is None:
show = params.get("page")
if show is None:
show = "tvshows"
plugintools.log("show= "+show)
plugintools.modo_vista(show)
data = gethttp_referer_headers(url, referer, show)
show = params.get("series_id") # Obtenemos modo de vista del usuario para series TV
if show is None:
show = params.get("page")
if show is None:
show = "tvshows"
plugintools.log("show= "+show)
plugintools.modo_vista(show)
#plugintools.log("data= "+data)
match_series = plugintools.find_single_match(data, '<div class="covers-box">(.*?)</ul>')
#plugintools.log("listado= "+match_series)
plugintools.add_item(action="", title = "[COLOR orange][B]Lista de series[/B][/COLOR]", url = url, thumbnail = thumbnail , fanart = fanart , folder = True, isPlayable = False)
letra_activa = plugintools.find_single_match(match_series, '<li class="active">(.*?)</li>')
url = plugintools.find_single_match(letra_activa, '<a href="([^"]+)')
plugintools.log("url= "+url)
title = url.replace("/lista-de-series/", "")
plugintools.add_item(action="", title = title, url = url, thumbnail = thumbnail , fanart = fanart , page = show, folder = True, isPlayable = False)
letras = plugintools.find_multiple_matches(match_series, '<li>(.*?)</a></li>')
for entry in letras:
url = plugintools.find_single_match(entry, '<a href="([^"]+)')
plugintools.log("url= "+url)
title = url.replace("/lista-de-series/", "")
plugintools.log("title= "+title)
plugintools.add_item(action="lista_letra", title = title, url = url, thumbnail = thumbnail , fanart = fanart , page = show , folder = True, isPlayable = False)
def lista_letra(params):
plugintools.log('[%s %s] lista_letra %s' % (addonName, addonVersion, repr(params)))
show = params.get("series_id") # Obtenemos modo de vista del usuario para series TV
if show == "":
show = params.get("page")
if show == "":
show = "tvshows"
plugintools.log("show= "+show)
plugintools.modo_vista(show)
url = params.get("url")
url = 'http://www.seriesyonkis.sx/'+url
referer = 'http://www.seriesyonkis.sx/'
data = gethttp_referer_headers(url, referer, show)
show = params.get("series_id") # Obtenemos modo de vista del usuario para series TV
if show is None:
show = params.get("page")
if show is None:
show = "tvshows"
plugintools.log("show= "+show)
plugintools.modo_vista(show)
#plugintools.log("data= "+data)
match_series = plugintools.find_single_match(data, '<div class="covers-box">(.*?)<div id="sidebar-section">')
plugintools.log("listado= "+match_series)
# Paginador de series por letra (botón "siguiente")
paginador_next(data)
# Listado de series
lista_series(match_series)
# Listado de series
def lista_series(match_series):
show = params.get("series_id") # Obtenemos modo de vista del usuario para series TV
if show == "":
show = params.get("page")
if show == "":
show = "tvshows"
plugintools.log("show= "+show)
plugintools.modo_vista(show)
serie = plugintools.find_multiple_matches(match_series, '<li>(.*?)</a></li>')
for entry in serie:
url = plugintools.find_single_match(entry, 'href="([^"]+)')
url = 'http://www.seriesyonkis.sx'+url
plugintools.log("url= "+url)
title_serie = plugintools.find_single_match(entry, 'title="([^"]+)').strip()
plugintools.log("title_serie= "+title_serie)
if title_serie != "":
plugintools.log("url_serie= "+url)
plugintools.add_item(action="serie_capis", title = title_serie, url = url, thumbnail = thumbnail , fanart = fanart , page = show , folder = True, isPlayable = False)
# Paginador de series por letra
def paginador_next(data):
show = params.get("series_id") # Obtenemos modo de vista del usuario para series TV
if show == "":
show = params.get("page")
if show == "":
show = "tvshows"
plugintools.log("show= "+show)
plugintools.modo_vista(show)
match_paginas = plugintools.find_single_match(data, 'class="paginator">(.*?)<div id="sidebar-section">')
plugintools.log("match_paginas= "+match_paginas)
pag_actual = plugintools.find_single_match(match_paginas, '<strong>(.*?)</strong>')
plugintools.log("pag_actual = "+str(pag_actual))
num_pags = plugintools.find_multiple_matches(match_paginas, '<a(.*?)</a>')
i = 0
for entry in num_pags:
i = i + 1
plugintools.log("Núm. páginas= "+str(i))
next = int(pag_actual) + 1
plugintools.add_item(action="", title= '[COLOR lightyellow][I]Siguiente (Pág. '+str(next)+')[/I][/COLOR]', url = "", thumbnail = thumbnail , fanart = fanart , page = show , folder = True , isPlayable = False)
def serie_capis(params):
plugintools.log('[%s %s] serie_capis %s' % (addonName, addonVersion, repr(params)))
datamovie={}
if params.get("plot") != "":
datamovie["Plot"]=params.get("plot") # Cargamos sinopsis de la serie... (si existe)
else:
datamovie["Plot"]="."
show = params.get("series_id") # Obtenemos modo de vista del usuario para series TV
if show is None:
show = params.get("page")
if show is None:
show = "tvshows"
print show
plugintools.modo_vista(show)
url = params.get("url")
referer = 'http://www.seriesyonkis.sx/'
data = gethttp_referer_headers(url,referer,show)
show = params.get("series_id") # Obtenemos modo de vista del usuario para series TV
if show is None:
show = params.get("page")
if show is None:
show = "tvshows"
plugintools.log("show= "+show)
plugintools.modo_vista(show)
#plugintools.log("data= "+data)
#Carátula
cover = plugintools.find_single_match(data, '<img src="([^"]+)')
match_temporadas = plugintools.find_single_match(data, '<div id="section-content">(.*?)</ul>')
temps = plugintools.find_multiple_matches(match_temporadas, '<h3 class="season"(.*?)</li>')
for entry in temps:
capis = plugintools.find_multiple_matches(entry, '<td class="episode-title">(.*?)</td>')
for entri in capis:
url_cap = plugintools.find_single_match(entri, '<a href="([^"]+)')
url_cap = 'http://www.seriesyonkis.sx'+url_cap
plugintools.log("url_cap= "+url_cap)
num_cap = plugintools.find_single_match(entri, '<strong>(.*?)</strong>')
num_cap = num_cap.strip()
plugintools.log("num_cap= "+num_cap)
title_cap = plugintools.find_single_match(entri, '</strong>(.*?)</a>')
title_cap = title_cap.strip()
plugintools.log("title_cap= "+title_cap)
title_capi = '[COLOR orange][B]'+num_cap+'[/B][COLOR white]'+title_cap+'[/COLOR]'.strip()
title_fixed = num_cap + title_cap
title_fixed = title_fixed.strip()
plugintools.add_item(action="enlaces_capi", title=title_capi, url = url_cap, thumbnail = cover , plot = datamovie["Plot"], info_labels = datamovie , fanart = fanart, folder = True, page = show, extra = title_fixed , isPlayable = False)
def enlaces_capi(params):
plugintools.log('[%s %s] enlaces_capi %s' % (addonName, addonVersion, repr(params)))
datamovie = {}
datamovie["Plot"] = params.get("plot")
show = params.get("series_id") # Obtenemos modo de vista del usuario para series TV
if show is None:
show = params.get("page")
if show is None:
show = "tvshows"
plugintools.log("show= "+show)
plugintools.modo_vista(show)
url = params.get("url")
title_fixed = params.get("extra")
referer = 'http://www.seriesyonkis.sx/'
data = gethttp_referer_headers(url,referer,show)
plugintools.modo_vista(show)
#plugintools.log("data= "+data)
matches = plugintools.find_single_match(data, '<h2 class="header-subtitle veronline">(.*?)</table>')
match_veronline = plugintools.find_single_match(matches, '<tbody>(.*?)</tbody>')
match_links = plugintools.find_multiple_matches(match_veronline, '<tr>(.*?)</tr>')
for entry in match_links:
#plugintools.log("entry= "+entry)
title_url = plugintools.find_single_match(entry, 'title="([^"]+)')
page_url = plugintools.find_single_match(entry, '<a href="([^"]+)')
server = plugintools.find_single_match(entry, 'watch via([^"]+)')
plugintools.log("server= "+server)
idioma_capi = plugintools.find_single_match(entry, '<span class="flags(.*?)</span></td>')
idioma_capi_fixed = idioma_capi.split(">")
if len(idioma_capi_fixed) >= 2:
idioma_capi = idioma_capi_fixed[1]
plugintools.log("idioma_capi= "+idioma_capi)
if idioma_capi == "English":
idioma_capi = ' [ENG]'
elif idioma_capi == "english":
idioma_capi = ' [ENG]'
elif idioma_capi == "Español":
idioma_capi = ' [ESP]'
elif idioma_capi == "Latino":
idioma_capi = ' [LAT]'
elif idioma_capi.find("English-Spanish SUBS") >= 0:
idioma_capi = ' [VOSE]'
elif idioma_capi.find("Japanese-Spanish SUBS") >= 0:
idioma_capi = ' [VOSE]'
else:
idioma_capi = " [N/D]"
plugintools.log("idioma_capi= "+idioma_capi)
page_url = 'http://www.seriesyonkis.sx/'+page_url
if server.find("tumi") >= 0:
desc = '[Tumi]'
plugintools.add_item(action="getlink", title = title_fixed + ' [COLOR orange][I]'+desc+'[/I][/COLOR] [COLOR lightyellow][I]'+idioma_capi+'[/I][/COLOR]' , url = page_url , info_labels = datamovie , thumbnail = thumbnail , fanart = fanart, page = show , folder = False, isPlayable = True)
elif server.find("streamin.to") >= 0:
desc = '[Streamin.to]'
plugintools.add_item(action="getlink", title = title_fixed + ' [COLOR orange][I]'+desc+'[/I][/COLOR] [COLOR lightyellow][I]'+idioma_capi+'[/I][/COLOR]' , url = page_url , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart, page = show , folder = False, isPlayable = True)
elif server.find("vidspot") >= 0:
desc = '[Vidspot]'
plugintools.add_item(action="getlink", title = title_fixed + ' [COLOR orange][I]'+desc+'[/I][/COLOR] [COLOR lightyellow][I]'+idioma_capi+'[/I][/COLOR]' , url = page_url , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart, page = show , folder = False, isPlayable = True)
elif server.find("allmyvideos") >= 0:
desc = '[allmyvideos]'
plugintools.add_item(action="getlink", title = title_fixed + ' [COLOR orange][I]'+desc+'[/I][/COLOR] [COLOR lightyellow][I]'+idioma_capi+'[/I][/COLOR]' , url = page_url , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart, page = show , folder = False, isPlayable = True)
elif server.find("streamcloud") >= 0:
desc = '[Streamcloud]'
plugintools.add_item(action="getlink", title = title_fixed + ' [COLOR orange][I]'+desc+'[/I][/COLOR] [COLOR lightyellow][I]'+idioma_capi+'[/I][/COLOR]' , url = page_url , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart, page = show , folder = False, isPlayable = True)
elif server.find("nowvideo") >= 0:
desc = '[Nowvideo]'
plugintools.add_item(action="getlink", title = title_fixed + ' [COLOR orange][I]'+desc+'[/I][/COLOR] [COLOR lightyellow][I]'+idioma_capi+'[/I][/COLOR]' , url = page_url , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart, page = show , folder = False, isPlayable = True)
elif server.find("veehd") >= 0:
desc = '[VeeHD]'
plugintools.add_item(action="getlink", title = title_fixed + ' [COLOR orange][I]'+desc+'[/I][/COLOR] [COLOR lightyellow][I]'+idioma_capi+'[/I][/COLOR]' , url = page_url , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart, page = show , folder = False, isPlayable = True)
if server.find("allmyvideos") >= 0:
desc = '[Allmyvideos]'
plugintools.add_item(action="getlink", title = title_fixed + ' [COLOR orange][I]'+desc+'[/I][/COLOR] [COLOR lightyellow][I]'+idioma_capi+'[/I][/COLOR]' , url = page_url , info_labels = datamovie , thumbnail = thumbnail , fanart = fanart, page = show , folder = False, isPlayable = True)
elif server.find("Novamov") >= 0:
desc = '[Novamov]'
plugintools.add_item(action="getlink", title = title_fixed + ' [COLOR orange][I]'+desc+'[/I][/COLOR] [COLOR lightyellow][I]'+idioma_capi+'[/I][/COLOR]' , url = page_url , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart, page = show , folder = False, isPlayable = True)
elif server.find("Moevideos") >= 0:
desc = '[Vidspot]'
plugintools.add_item(action="getlink", title = title_fixed + ' [COLOR orange][I]'+desc+'[/I][/COLOR] [COLOR lightyellow][I]'+idioma_capi+'[/I][/COLOR]' , url = page_url , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart, page = show , folder = False, isPlayable = True)
elif server.find("Gamovideo") >= 0:
desc = '[allmyvideos]'
plugintools.add_item(action="getlink", title = title_fixed + ' [COLOR orange][I]'+desc+'[/I][/COLOR] [COLOR lightyellow][I]'+idioma_capi+'[/I][/COLOR]' , url = page_url , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart, page = show , folder = False, isPlayable = True)
elif server.find("Movshare") >= 0:
desc = '[Streamcloud]'
plugintools.add_item(action="getlink", title = title_fixed + ' [COLOR orange][I]'+desc+'[/I][/COLOR] [COLOR lightyellow][I]'+idioma_capi+'[/I][/COLOR]' , url = page_url , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart, page = show , folder = False, isPlayable = True)
elif server.find("played.to") >= 0:
desc = '[Played.to]'
plugintools.add_item(action="getlink", title = title_fixed + ' [COLOR orange][I]'+desc+'[/I][/COLOR] [COLOR lightyellow][I]'+idioma_capi+'[/I][/COLOR]' , url = page_url , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart, page = show , folder = False, isPlayable = True)
elif server.find("Mail.ru") >= 0:
desc = '[Mail.ru]'
plugintools.add_item(action="getlink", title = title_fixed + ' [COLOR orange][I]'+desc+'[/I][/COLOR] [COLOR lightyellow][I]'+idioma_capi+'[/I][/COLOR]' , url = page_url , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart, page = show , folder = False, isPlayable = True)
elif server.find("vk") >= 0:
desc = '[Vk]'
plugintools.add_item(action="getlink", title = title_fixed + ' [COLOR orange][I]'+desc+'[/I][/COLOR] [COLOR lightyellow][I]'+idioma_capi+'[/I][/COLOR]' , url = page_url , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart, page = show , folder = False, isPlayable = True)
elif server.find("videobam") >= 0:
desc = '[Videobam]'
plugintools.add_item(action="getlink", title = title_fixed + ' [COLOR orange][I]'+desc+'[/I][/COLOR] [COLOR lightyellow][I]'+idioma_capi+'[/I][/COLOR]' , url = page_url , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart, page = show , folder = False, isPlayable = True)
def getlink(params):
plugintools.log('[%s %s] getlink %s' % (addonName, addonVersion, repr(params)))
show = params.get("series_id") # Obtenemos modo de vista del usuario para series TV
if show is None:
show = params.get("page")
if show is None:
show = "tvshows"
plugintools.log("show= "+show)
plugintools.modo_vista(show)
page_url = params.get("url")
referer = 'http://www.seriesyonkis.sx/'
data = gethttp_referer_headers(page_url,referer,show)
plugintools.modo_vista(show)
show = params.get("series_id") # Obtenemos modo de vista del usuario para series TV
if show is None:
show = params.get("page")
if show is None:
show = "tvshows"
plugintools.log("show= "+show)
plugintools.modo_vista(show)
match = plugintools.find_single_match(data, '<table class="episodes full-width">(.*?)</table>')
url_final = plugintools.find_single_match(match, '<a href="([^"]+)')
if url_final.find("allmyvideos") >= 0:
params["url"]=url_final
allmyvideos(params)
elif url_final.find("vidspot") >= 0:
params["url"]=url_final
vidspot(params)
if url_final.find("played.to") >= 0:
params["url"]=url_final
playedto(params)
elif url_final.find("streamcloud") >= 0:
params["url"]=url_final
streamcloud(params)
elif url_final.find("nowvideo.sx") >= 0:
params["url"]=url_final
nowvideo(params)
elif url_final.find("streamin.to") >= 0:
params["url"]=url_final
streaminto(params)
elif url_final.find("veehd") >= 0:
params["url"]=url_final
veehd(params)
elif url_final.find("novamov") >= 0:
params["url"]=url_final
novamov(params)
elif url_final.find("gamovideo") >= 0:
params["url"]=url_final
gamovideo(params)
elif url_final.find("moevideos") >= 0:
params["url"]=url_final
moevideos(params)
elif url_final.find("movshare") >= 0:
params["url"]=url_final
movshare(params)
elif url_final.find("vk") >= 0:
params["url"]=url_final
vk(params)
elif url_final.find("tumi") >= 0:
params["url"]=url_final
tumi(params)
elif url_final.find("vk") >= 0:
params["url"]=url_final
vk(params)
elif url_final.find("videobam") >= 0:
params["url"]=url_final
videobam(params)
elif url_final.find("mail.ru") >= 0:
params["url"]=url_final
mailru(params)
plugintools.modo_vista(show)
def gethttp_referer_headers(url,referer,show):
params = plugintools.get_params()
show_default = params.get("series_id") # Obtenemos modo de vista del usuario para series TV
if show_default is None:
plugintools.log("show= "+show)
plugintools.modo_vista(show)
else:
show = show_default
plugintools.log("show= "+show)
plugintools.modo_vista(show)
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
request_headers.append(["Referer", referer])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
plugintools.modo_vista(show)
return body
|
gpl-2.0
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/numpy/core/generate_numpy_api.py
|
16
|
7465
|
from __future__ import division, print_function
import os
import genapi
from genapi import \
TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi
import numpy_api
h_template = r"""
#ifdef _MULTIARRAYMODULE
typedef struct {
PyObject_HEAD
npy_bool obval;
} PyBoolScalarObject;
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#else
NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#endif
%s
#else
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL
#endif
#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)
extern void **PyArray_API;
#else
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
void **PyArray_API;
#else
static void **PyArray_API=NULL;
#endif
#endif
%s
#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT)
static int
_import_array(void)
{
int st;
PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray");
PyObject *c_api = NULL;
if (numpy == NULL) {
PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import");
return -1;
}
c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
Py_DECREF(numpy);
if (c_api == NULL) {
PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found");
return -1;
}
#if PY_VERSION_HEX >= 0x03000000
if (!PyCapsule_CheckExact(c_api)) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object");
Py_DECREF(c_api);
return -1;
}
PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL);
#else
if (!PyCObject_Check(c_api)) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object");
Py_DECREF(c_api);
return -1;
}
PyArray_API = (void **)PyCObject_AsVoidPtr(c_api);
#endif
Py_DECREF(c_api);
if (PyArray_API == NULL) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer");
return -1;
}
/* Perform runtime check of C API version */
if (NPY_VERSION != PyArray_GetNDArrayCVersion()) {
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
"ABI version %%x but this version of numpy is %%x", \
(int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());
return -1;
}
if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) {
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
"API version %%x but this version of numpy is %%x", \
(int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion());
return -1;
}
/*
* Perform runtime check of endianness and check it matches the one set by
* the headers (npy_endian.h) as a safeguard
*/
st = PyArray_GetEndianness();
if (st == NPY_CPU_UNKNOWN_ENDIAN) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian");
return -1;
}
#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
if (st != NPY_CPU_BIG) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
"big endian, but detected different endianness at runtime");
return -1;
}
#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
if (st != NPY_CPU_LITTLE) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
"little endian, but detected different endianness at runtime");
return -1;
}
#endif
return 0;
}
#if PY_VERSION_HEX >= 0x03000000
#define NUMPY_IMPORT_ARRAY_RETVAL NULL
#else
#define NUMPY_IMPORT_ARRAY_RETVAL
#endif
#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } }
#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } }
#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } }
#endif
#endif
"""
c_template = r"""
/* These pointers will be stored in the C-object for use in other
extension modules
*/
void *PyArray_API[] = {
%s
};
"""
c_api_header = """
===========
Numpy C-API
===========
"""
def generate_api(output_dir, force=False):
basename = 'multiarray_api'
h_file = os.path.join(output_dir, '__%s.h' % basename)
c_file = os.path.join(output_dir, '__%s.c' % basename)
d_file = os.path.join(output_dir, '%s.txt' % basename)
targets = (h_file, c_file, d_file)
sources = numpy_api.multiarray_api
if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])):
return targets
else:
do_generate_api(targets, sources)
return targets
def do_generate_api(targets, sources):
header_file = targets[0]
c_file = targets[1]
doc_file = targets[2]
global_vars = sources[0]
global_vars_types = sources[1]
scalar_bool_values = sources[2]
types_api = sources[3]
multiarray_funcs = sources[4]
# Remove global_vars_type: not a api dict
multiarray_api = sources[:1] + sources[2:]
module_list = []
extension_list = []
init_list = []
# Check multiarray api indexes
multiarray_api_index = genapi.merge_api_dicts(multiarray_api)
genapi.check_api_dict(multiarray_api_index)
numpyapi_list = genapi.get_api_functions('NUMPY_API',
multiarray_funcs)
ordered_funcs_api = genapi.order_dict(multiarray_funcs)
# Create dict name -> *Api instance
api_name = 'PyArray_API'
multiarray_api_dict = {}
for f in numpyapi_list:
name = f.name
index = multiarray_funcs[name]
multiarray_api_dict[f.name] = FunctionApi(f.name, index, f.return_type,
f.args, api_name)
for name, index in global_vars.items():
type = global_vars_types[name]
multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name)
for name, index in scalar_bool_values.items():
multiarray_api_dict[name] = BoolValuesApi(name, index, api_name)
for name, index in types_api.items():
multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name)
if len(multiarray_api_dict) != len(multiarray_api_index):
raise AssertionError("Multiarray API size mismatch %d %d" %
(len(multiarray_api_dict), len(multiarray_api_index)))
extension_list = []
for name, index in genapi.order_dict(multiarray_api_index):
api_item = multiarray_api_dict[name]
extension_list.append(api_item.define_from_array_api_string())
init_list.append(api_item.array_api_define())
module_list.append(api_item.internal_define())
# Write to header
fid = open(header_file, 'w')
s = h_template % ('\n'.join(module_list), '\n'.join(extension_list))
fid.write(s)
fid.close()
# Write to c-code
fid = open(c_file, 'w')
s = c_template % ',\n'.join(init_list)
fid.write(s)
fid.close()
# write to documentation
fid = open(doc_file, 'w')
fid.write(c_api_header)
for func in numpyapi_list:
fid.write(func.to_ReST())
fid.write('\n\n')
fid.close()
return targets
|
gpl-3.0
|
diofant/diofant
|
diofant/tests/vector/test_vector.py
|
2
|
6040
|
import pytest
from diofant import Add, Derivative, Function
from diofant import ImmutableMatrix as Matrix
from diofant import (Integral, Mul, Pow, cos, diff, pi, simplify, sin, sqrt,
symbols, trigsimp)
from diofant.abc import a, b, c
from diofant.vector import (BaseVector, CoordSysCartesian, Vector, VectorAdd,
VectorMul, VectorZero)
__all__ = ()
C = CoordSysCartesian('C')
i, j, k = C.base_vectors()
def test_vector_diofant():
"""
Test whether the Vector framework confirms to the hashing
and equality testing properties of Diofant.
"""
v1 = 3*j
assert v1 == j*3
assert v1.components == {j: 3}
v2 = 3*i + 4*j + 5*k
v3 = 2*i + 4*j + i + 4*k + k
assert v3 == v2
assert v3.__hash__() == v2.__hash__()
def test_vector():
pytest.raises(ValueError, lambda: BaseVector('x', 10, C, ' ', ' '))
pytest.raises(TypeError, lambda: BaseVector('x', 0, a, ' ', ' '))
assert isinstance(i, BaseVector)
assert i != j
assert j != k
assert k != i
assert i - i == Vector.zero
assert i + Vector.zero == i
assert i - Vector.zero == i
assert Vector.zero != 0
assert -Vector.zero == Vector.zero
assert Vector.zero - i == -i
v1 = a*i + b*j + c*k
v2 = a**2*i + b**2*j + c**2*k
v3 = v1 + v2
v4 = 2 * v1
v5 = a * i
assert i + Mul(2, i) == 3*i
assert i + Add(i, j) == 2*i + j
pytest.raises(TypeError, lambda: i + Pow(j, 2))
assert isinstance(v1, VectorAdd)
assert v1 - v1 == Vector.zero
assert v1 + Vector.zero == v1
assert v1.dot(i) == a
assert v1.dot(j) == b
assert v1.dot(k) == c
assert i.dot(v2) == a**2
assert j.dot(v2) == b**2
assert k.dot(v2) == c**2
assert v3.dot(i) == a**2 + a
assert v3.dot(j) == b**2 + b
assert v3.dot(k) == c**2 + c
assert v1 + v2 == v2 + v1
assert v1 - v2 == -1 * (v2 - v1)
assert a * v1 == v1 * a
pytest.raises(ValueError, lambda: (i + j)*(i - j))
pytest.raises(TypeError, lambda: v1/v2)
pytest.raises(ValueError, lambda: v1/0)
assert isinstance(v5, VectorMul)
assert v5.base_vector == i
assert v5.measure_number == a
assert isinstance(v4, Vector)
assert isinstance(v4, VectorAdd)
assert isinstance(v4, Vector)
assert isinstance(Vector.zero, VectorZero)
assert isinstance(Vector.zero, Vector)
assert isinstance(v1 * 0, VectorZero)
assert v1.to_matrix(C) == Matrix([[a], [b], [c]])
assert i.components == {i: 1}
assert v5.components == {i: a}
assert v1.components == {i: a, j: b, k: c}
assert VectorAdd(v1, Vector.zero) == v1
assert VectorMul(a, v1) == v1*a
assert VectorMul(1, i) == i
assert VectorAdd(v1, Vector.zero) == v1
assert VectorMul(0, Vector.zero) == Vector.zero
v = (a*b + a*c + b**2 + b*c)*i + j
assert v.factor() == (a + b)*(b + c)*i + j
def test_vector_magnitude_normalize():
assert Vector.zero.magnitude() == 0
assert Vector.zero.normalize() == Vector.zero
assert i.magnitude() == 1
assert j.magnitude() == 1
assert k.magnitude() == 1
assert i.normalize() == i
assert j.normalize() == j
assert k.normalize() == k
v1 = a * i
assert v1.normalize() == (a/sqrt(a**2))*i
assert v1.magnitude() == sqrt(a**2)
v2 = a*i + b*j + c*k
assert v2.magnitude() == sqrt(a**2 + b**2 + c**2)
assert v2.normalize() == v2 / v2.magnitude()
v3 = i + j
assert v3.normalize() == (sqrt(2)/2)*C.i + (sqrt(2)/2)*C.j
def test_vector_simplify():
A, s, k, m = symbols('A, s, k, m')
test1 = (1 / a + 1 / b) * i
assert (test1 & i) != (a + b) / (a * b)
test1 = simplify(test1)
assert (test1 & i) == (a + b) / (a * b)
assert test1.simplify() == simplify(test1)
test2 = (A**2 * s**4 / (4 * pi * k * m**3)) * i
test2 = simplify(test2)
assert (test2 & i) == (A**2 * s**4 / (4 * pi * k * m**3))
test3 = ((4 + 4 * a - 2 * (2 + 2 * a)) / (2 + 2 * a)) * i
test3 = simplify(test3)
assert (test3 & i) == 0
test4 = ((-4 * a * b**2 - 2 * b**3 - 2 * a**2 * b) / (a + b)**2) * i
test4 = simplify(test4)
assert (test4 & i) == -2 * b
v = (sin(a)+cos(a))**2*i - j
assert trigsimp(v) == (2*sin(a + pi/4)**2)*i + (-1)*j
assert trigsimp(v) == v.trigsimp()
assert simplify(Vector.zero) == Vector.zero
def test_vector_dot():
assert i.dot(Vector.zero) == 0
assert Vector.zero.dot(i) == 0
assert i & Vector.zero == 0
assert i.dot(i) == 1
assert i.dot(j) == 0
assert i.dot(k) == 0
assert i & i == 1
assert i & j == 0
assert i & k == 0
assert j.dot(i) == 0
assert j.dot(j) == 1
assert j.dot(k) == 0
assert j & i == 0
assert j & j == 1
assert j & k == 0
assert k.dot(i) == 0
assert k.dot(j) == 0
assert k.dot(k) == 1
assert k & i == 0
assert k & j == 0
assert k & k == 1
def test_vector_cross():
assert i.cross(Vector.zero) == Vector.zero
assert Vector.zero.cross(i) == Vector.zero
assert i.cross(i) == Vector.zero
assert i.cross(j) == k
assert i.cross(k) == -j
assert i ^ i == Vector.zero
assert i ^ j == k
assert i ^ k == -j
assert j.cross(i) == -k
assert j.cross(j) == Vector.zero
assert j.cross(k) == i
assert j ^ i == -k
assert j ^ j == Vector.zero
assert j ^ k == i
assert k.cross(i) == j
assert k.cross(j) == -i
assert k.cross(k) == Vector.zero
assert k ^ i == j
assert k ^ j == -i
assert k ^ k == Vector.zero
def test_vector_diff_integrate():
f = Function('f')
v = f(a)*C.i + a**2*C.j - C.k
pytest.raises(TypeError, lambda: v.diff(v))
assert Derivative(v, a) == Derivative((f(a))*C.i +
a**2*C.j + (-1)*C.k, a)
assert (diff(v, a) == v.diff(a) == Derivative(v, a).doit() ==
(Derivative(f(a), a))*C.i + 2*a*C.j)
assert (Integral(v, a) == (Integral(f(a), a))*C.i +
(Integral(a**2, a))*C.j + (Integral(-1, a))*C.k)
|
bsd-3-clause
|
skycucumber/Messaging-Gateway
|
webapp/venv/lib/python2.7/site-packages/werkzeug/contrib/securecookie.py
|
294
|
12204
|
# -*- coding: utf-8 -*-
r"""
werkzeug.contrib.securecookie
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements a cookie that is not alterable from the client
because it adds a checksum the server checks for. You can use it as
session replacement if all you have is a user id or something to mark
a logged in user.
Keep in mind that the data is still readable from the client as a
normal cookie is. However you don't have to store and flush the
sessions you have at the server.
Example usage:
>>> from werkzeug.contrib.securecookie import SecureCookie
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
Dumping into a string so that one can store it in a cookie:
>>> value = x.serialize()
Loading from that string again:
>>> x = SecureCookie.unserialize(value, "deadbeef")
>>> x["baz"]
(1, 2, 3)
If someone modifies the cookie and the checksum is wrong the unserialize
method will fail silently and return a new empty `SecureCookie` object.
Keep in mind that the values will be visible in the cookie so do not
store data in a cookie you don't want the user to see.
Application Integration
=======================
If you are using the werkzeug request objects you could integrate the
secure cookie into your application like this::
from werkzeug.utils import cached_property
from werkzeug.wrappers import BaseRequest
from werkzeug.contrib.securecookie import SecureCookie
# don't use this key but a different one; you could just use
# os.urandom(20) to get something random
SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea'
class Request(BaseRequest):
@cached_property
def client_session(self):
data = self.cookies.get('session_data')
if not data:
return SecureCookie(secret_key=SECRET_KEY)
return SecureCookie.unserialize(data, SECRET_KEY)
def application(environ, start_response):
request = Request(environ, start_response)
# get a response object here
response = ...
if request.client_session.should_save:
session_data = request.client_session.serialize()
response.set_cookie('session_data', session_data,
httponly=True)
return response(environ, start_response)
A less verbose integration can be achieved by using shorthand methods::
class Request(BaseRequest):
@cached_property
def client_session(self):
return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET)
def application(environ, start_response):
request = Request(environ, start_response)
# get a response object here
response = ...
request.client_session.save_cookie(response)
return response(environ, start_response)
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import pickle
import base64
from hmac import new as hmac
from time import time
from hashlib import sha1 as _default_hash
from werkzeug._compat import iteritems, text_type
from werkzeug.urls import url_quote_plus, url_unquote_plus
from werkzeug._internal import _date_to_unix
from werkzeug.contrib.sessions import ModificationTrackingDict
from werkzeug.security import safe_str_cmp
from werkzeug._compat import to_native
class UnquoteError(Exception):
"""Internal exception used to signal failures on quoting."""
class SecureCookie(ModificationTrackingDict):
"""Represents a secure cookie. You can subclass this class and provide
an alternative mac method. The import thing is that the mac method
is a function with a similar interface to the hashlib. Required
methods are update() and digest().
Example usage:
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
>>> x["foo"]
42
>>> x["baz"]
(1, 2, 3)
>>> x["blafasel"] = 23
>>> x.should_save
True
:param data: the initial data. Either a dict, list of tuples or `None`.
:param secret_key: the secret key. If not set `None` or not specified
it has to be set before :meth:`serialize` is called.
:param new: The initial value of the `new` flag.
"""
#: The hash method to use. This has to be a module with a new function
#: or a function that creates a hashlib object. Such as `hashlib.md5`
#: Subclasses can override this attribute. The default hash is sha1.
#: Make sure to wrap this in staticmethod() if you store an arbitrary
#: function there such as hashlib.sha1 which might be implemented
#: as a function.
hash_method = staticmethod(_default_hash)
#: the module used for serialization. Unless overriden by subclasses
#: the standard pickle module is used.
serialization_method = pickle
#: if the contents should be base64 quoted. This can be disabled if the
#: serialization process returns cookie safe strings only.
quote_base64 = True
def __init__(self, data=None, secret_key=None, new=True):
ModificationTrackingDict.__init__(self, data or ())
# explicitly convert it into a bytestring because python 2.6
# no longer performs an implicit string conversion on hmac
if secret_key is not None:
secret_key = bytes(secret_key)
self.secret_key = secret_key
self.new = new
def __repr__(self):
return '<%s %s%s>' % (
self.__class__.__name__,
dict.__repr__(self),
self.should_save and '*' or ''
)
@property
def should_save(self):
"""True if the session should be saved. By default this is only true
for :attr:`modified` cookies, not :attr:`new`.
"""
return self.modified
@classmethod
def quote(cls, value):
"""Quote the value for the cookie. This can be any object supported
by :attr:`serialization_method`.
:param value: the value to quote.
"""
if cls.serialization_method is not None:
value = cls.serialization_method.dumps(value)
if cls.quote_base64:
value = b''.join(base64.b64encode(value).splitlines()).strip()
return value
@classmethod
def unquote(cls, value):
"""Unquote the value for the cookie. If unquoting does not work a
:exc:`UnquoteError` is raised.
:param value: the value to unquote.
"""
try:
if cls.quote_base64:
value = base64.b64decode(value)
if cls.serialization_method is not None:
value = cls.serialization_method.loads(value)
return value
except Exception:
# unfortunately pickle and other serialization modules can
# cause pretty every error here. if we get one we catch it
# and convert it into an UnquoteError
raise UnquoteError()
def serialize(self, expires=None):
"""Serialize the secure cookie into a string.
If expires is provided, the session will be automatically invalidated
after expiration when you unseralize it. This provides better
protection against session cookie theft.
:param expires: an optional expiration date for the cookie (a
:class:`datetime.datetime` object)
"""
if self.secret_key is None:
raise RuntimeError('no secret key defined')
if expires:
self['_expires'] = _date_to_unix(expires)
result = []
mac = hmac(self.secret_key, None, self.hash_method)
for key, value in sorted(self.items()):
result.append(('%s=%s' % (
url_quote_plus(key),
self.quote(value).decode('ascii')
)).encode('ascii'))
mac.update(b'|' + result[-1])
return b'?'.join([
base64.b64encode(mac.digest()).strip(),
b'&'.join(result)
])
@classmethod
def unserialize(cls, string, secret_key):
"""Load the secure cookie from a serialized string.
:param string: the cookie value to unserialize.
:param secret_key: the secret key used to serialize the cookie.
:return: a new :class:`SecureCookie`.
"""
if isinstance(string, text_type):
string = string.encode('utf-8', 'replace')
if isinstance(secret_key, text_type):
secret_key = secret_key.encode('utf-8', 'replace')
try:
base64_hash, data = string.split(b'?', 1)
except (ValueError, IndexError):
items = ()
else:
items = {}
mac = hmac(secret_key, None, cls.hash_method)
for item in data.split(b'&'):
mac.update(b'|' + item)
if not b'=' in item:
items = None
break
key, value = item.split(b'=', 1)
# try to make the key a string
key = url_unquote_plus(key.decode('ascii'))
try:
key = to_native(key)
except UnicodeError:
pass
items[key] = value
# no parsing error and the mac looks okay, we can now
# sercurely unpickle our cookie.
try:
client_hash = base64.b64decode(base64_hash)
except TypeError:
items = client_hash = None
if items is not None and safe_str_cmp(client_hash, mac.digest()):
try:
for key, value in iteritems(items):
items[key] = cls.unquote(value)
except UnquoteError:
items = ()
else:
if '_expires' in items:
if time() > items['_expires']:
items = ()
else:
del items['_expires']
else:
items = ()
return cls(items, secret_key, False)
@classmethod
def load_cookie(cls, request, key='session', secret_key=None):
"""Loads a :class:`SecureCookie` from a cookie in request. If the
cookie is not set, a new :class:`SecureCookie` instanced is
returned.
:param request: a request object that has a `cookies` attribute
which is a dict of all cookie values.
:param key: the name of the cookie.
:param secret_key: the secret key used to unquote the cookie.
Always provide the value even though it has
no default!
"""
data = request.cookies.get(key)
if not data:
return cls(secret_key=secret_key)
return cls.unserialize(data, secret_key)
def save_cookie(self, response, key='session', expires=None,
session_expires=None, max_age=None, path='/', domain=None,
secure=None, httponly=False, force=False):
"""Saves the SecureCookie in a cookie on response object. All
parameters that are not described here are forwarded directly
to :meth:`~BaseResponse.set_cookie`.
:param response: a response object that has a
:meth:`~BaseResponse.set_cookie` method.
:param key: the name of the cookie.
:param session_expires: the expiration date of the secure cookie
stored information. If this is not provided
the cookie `expires` date is used instead.
"""
if force or self.should_save:
data = self.serialize(session_expires or expires)
response.set_cookie(key, data, expires=expires, max_age=max_age,
path=path, domain=domain, secure=secure,
httponly=httponly)
|
gpl-2.0
|
bassijtsma/chatbot
|
yowsup/layers/protocol_groups/protocolentities/notification_groups_add.py
|
61
|
1946
|
from .notification_groups import GroupsNotificationProtocolEntity
from yowsup.structs import ProtocolTreeNode
class AddGroupsNotificationProtocolEntity(GroupsNotificationProtocolEntity):
'''
<notification participant="{{participant_jiid}}" t="{{TIMESTAMP}}" from="{{group_jid}}" type="w:gp2" id="{{id}}" notify="WhatsApp">
<add>
<participant jid="{{JID_1}}">
</participant>
</add>
</notification>
'''
def __init__(self, _id, _from, timestamp, notify, participant, offline, participants):
super(AddGroupsNotificationProtocolEntity, self).__init__(_id, _from, timestamp, notify, participant, offline)
self.setParticipants(participants)
def setParticipants(self, participants):
assert type(participants) is list, "Must be a list of jids, got %s instead." % type(participants)
self.participants = participants
def getParticipants(self):
return self.participants
def __str__(self):
out = super(AddGroupsNotificationProtocolEntity, self).__str__()
out += "Participants: %s\n" % " ".join(self.getParticipants())
return out
def toProtocolTreeNode(self):
node = super(AddGroupsNotificationProtocolEntity, self).toProtocolTreeNode()
addNode = ProtocolTreeNode("add")
participants = []
for jid in self.getParticipants():
pnode = ProtocolTreeNode("participant", {"jid": jid})
participants.append(pnode)
addNode.addChildren(participants)
node.addChild(addNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
addNode = node.getChild("add")
participants = []
for p in addNode.getAllChildren("participant"):
participants.append(p["jid"])
return AddGroupsNotificationProtocolEntity(
node["id"], node["from"], node["t"], node["notify"], node["participant"], node["offline"],
participants
)
|
gpl-3.0
|
BludhavenGrayson/repository.BludhavenGrayson
|
plugin.video.bhg.uktvplay/net.py
|
81
|
10355
|
'''
common XBMC Module
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import cookielib
import gzip
import re
import StringIO
import urllib
import urllib2
import socket
#Set Global timeout - Useful for slow connections and Putlocker.
socket.setdefaulttimeout(30)
class HeadRequest(urllib2.Request):
'''A Request class that sends HEAD requests'''
def get_method(self):
return 'HEAD'
class Net:
'''
This class wraps :mod:`urllib2` and provides an easy way to make http
requests while taking care of cookies, proxies, gzip compression and
character encoding.
Example::
from t0mm0.common.net import Net
net = Net()
response = net.http_GET('http://xbmc.org')
print response.content
'''
_cj = cookielib.LWPCookieJar()
_proxy = None
_user_agent = 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 ' + \
'(KHTML, like Gecko) Chrome/13.0.782.99 Safari/535.1'
_http_debug = False
def __init__(self, cookie_file='', proxy='', user_agent='',
http_debug=False):
'''
Kwargs:
cookie_file (str): Full path to a file to be used to load and save
cookies to.
proxy (str): Proxy setting (eg.
``'http://user:pass@example.com:1234'``)
user_agent (str): String to use as the User Agent header. If not
supplied the class will use a default user agent (chrome)
http_debug (bool): Set ``True`` to have HTTP header info written to
the XBMC log for all requests.
'''
if cookie_file:
self.set_cookies(cookie_file)
if proxy:
self.set_proxy(proxy)
if user_agent:
self.set_user_agent(user_agent)
self._http_debug = http_debug
self._update_opener()
def set_cookies(self, cookie_file):
'''
Set the cookie file and try to load cookies from it if it exists.
Args:
cookie_file (str): Full path to a file to be used to load and save
cookies to.
'''
try:
self._cj.load(cookie_file, ignore_discard=True)
self._update_opener()
return True
except:
return False
def get_cookies(self):
'''Returns A dictionary containing all cookie information by domain.'''
return self._cj._cookies
def save_cookies(self, cookie_file):
'''
Saves cookies to a file.
Args:
cookie_file (str): Full path to a file to save cookies to.
'''
self._cj.save(cookie_file, ignore_discard=True)
def set_proxy(self, proxy):
'''
Args:
proxy (str): Proxy setting (eg.
``'http://user:pass@example.com:1234'``)
'''
self._proxy = proxy
self._update_opener()
def get_proxy(self):
'''Returns string containing proxy details.'''
return self._proxy
def set_user_agent(self, user_agent):
'''
Args:
user_agent (str): String to use as the User Agent header.
'''
self._user_agent = user_agent
def get_user_agent(self):
'''Returns user agent string.'''
return self._user_agent
def _update_opener(self):
'''
Builds and installs a new opener to be used by all future calls to
:func:`urllib2.urlopen`.
'''
if self._http_debug:
http = urllib2.HTTPHandler(debuglevel=1)
else:
http = urllib2.HTTPHandler()
if self._proxy:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
urllib2.ProxyHandler({'http':
self._proxy}),
urllib2.HTTPBasicAuthHandler(),
http)
else:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
urllib2.HTTPBasicAuthHandler(),
http)
urllib2.install_opener(opener)
def http_GET(self, url, headers={}, compression=True):
'''
Perform an HTTP GET request.
Args:
url (str): The URL to GET.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
return self._fetch(url, headers=headers, compression=compression)
def http_POST(self, url, form_data, headers={}, compression=True):
'''
Perform an HTTP POST request.
Args:
url (str): The URL to POST.
form_data (dict): A dictionary of form data to POST.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
return self._fetch(url, form_data, headers=headers,
compression=compression)
def http_HEAD(self, url, headers={}):
'''
Perform an HTTP HEAD request.
Args:
url (str): The URL to GET.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page.
'''
req = HeadRequest(url)
req.add_header('User-Agent', self._user_agent)
for k, v in headers.items():
req.add_header(k, v)
response = urllib2.urlopen(req)
return HttpResponse(response)
def _fetch(self, url, form_data={}, headers={}, compression=True):
'''
Perform an HTTP GET or POST request.
Args:
url (str): The URL to GET or POST.
form_data (dict): A dictionary of form data to POST. If empty, the
request will be a GET, if it contains form data it will be a POST.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
encoding = ''
req = urllib2.Request(url)
if form_data:
form_data = urllib.urlencode(form_data)
req = urllib2.Request(url, form_data)
req.add_header('User-Agent', self._user_agent)
for k, v in headers.items():
req.add_header(k, v)
if compression:
req.add_header('Accept-Encoding', 'gzip')
response = urllib2.urlopen(req)
return HttpResponse(response)
class HttpResponse:
'''
This class represents a resoponse from an HTTP request.
The content is examined and every attempt is made to properly encode it to
Unicode.
.. seealso::
:meth:`Net.http_GET`, :meth:`Net.http_HEAD` and :meth:`Net.http_POST`
'''
content = ''
'''Unicode encoded string containing the body of the reposne.'''
def __init__(self, response):
'''
Args:
response (:class:`mimetools.Message`): The object returned by a call
to :func:`urllib2.urlopen`.
'''
self._response = response
html = response.read()
try:
if response.headers['content-encoding'].lower() == 'gzip':
html = gzip.GzipFile(fileobj=StringIO.StringIO(html)).read()
except:
pass
try:
content_type = response.headers['content-type']
if 'charset=' in content_type:
encoding = content_type.split('charset=')[-1]
except:
pass
r = re.search('<meta\s+http-equiv="Content-Type"\s+content="(?:.+?);' +
'\s+charset=(.+?)"', html, re.IGNORECASE)
if r:
encoding = r.group(1)
try:
html = unicode(html, encoding)
except:
pass
self.content = html
def get_headers(self):
'''Returns a List of headers returned by the server.'''
return self._response.info().headers
def get_url(self):
'''
Return the URL of the resource retrieved, commonly used to determine if
a redirect was followed.
'''
return self._response.geturl()
|
gpl-2.0
|
pe-suke/ansible
|
lib/ansible/module_utils/cloudstack.py
|
59
|
14689
|
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
class AnsibleCloudStack(object):
def __init__(self, module):
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
self.result = {
'changed': False,
}
# Common returns, will be merged with self.returns
# search_for_key: replace_with_key
self.common_returns = {
'id': 'id',
'name': 'name',
'created': 'created',
'zonename': 'zone',
'state': 'state',
'project': 'project',
'account': 'account',
'domain': 'domain',
'displaytext': 'display_text',
'displayname': 'display_name',
'description': 'description',
}
# Init returns dict for use in subclasses
self.returns = {}
self.module = module
self._connect()
self.domain = None
self.account = None
self.project = None
self.ip_address = None
self.zone = None
self.vm = None
self.os_type = None
self.hypervisor = None
self.capabilities = None
def _connect(self):
api_key = self.module.params.get('api_key')
api_secret = self.module.params.get('secret_key')
api_url = self.module.params.get('api_url')
api_http_method = self.module.params.get('api_http_method')
api_timeout = self.module.params.get('api_timeout')
if api_key and api_secret and api_url:
self.cs = CloudStack(
endpoint=api_url,
key=api_key,
secret=api_secret,
timeout=api_timeout,
method=api_http_method
)
else:
api_region = self.module.params.get('api_region', 'cloudstack')
self.cs = CloudStack(**read_config(api_region))
def get_or_fallback(self, key=None, fallback_key=None):
value = self.module.params.get(key)
if not value:
value = self.module.params.get(fallback_key)
return value
# TODO: for backward compatibility only, remove if not used anymore
def _has_changed(self, want_dict, current_dict, only_keys=None):
return self.has_changed(want_dict=want_dict, current_dict=current_dict, only_keys=only_keys)
def has_changed(self, want_dict, current_dict, only_keys=None):
for key, value in want_dict.iteritems():
# Optionally limit by a list of keys
if only_keys and key not in only_keys:
continue;
# Skip None values
if value is None:
continue;
if key in current_dict:
# API returns string for int in some cases, just to make sure
if isinstance(value, int):
current_dict[key] = int(current_dict[key])
elif isinstance(value, str):
current_dict[key] = str(current_dict[key])
# Only need to detect a singe change, not every item
if value != current_dict[key]:
return True
return False
def _get_by_key(self, key=None, my_dict={}):
if key:
if key in my_dict:
return my_dict[key]
self.module.fail_json(msg="Something went wrong: %s not found" % key)
return my_dict
def get_project(self, key=None):
if self.project:
return self._get_by_key(key, self.project)
project = self.module.params.get('project')
if not project:
return None
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
projects = self.cs.listProjects(**args)
if projects:
for p in projects['project']:
if project.lower() in [ p['name'].lower(), p['id'] ]:
self.project = p
return self._get_by_key(key, self.project)
self.module.fail_json(msg="project '%s' not found" % project)
def get_ip_address(self, key=None):
if self.ip_address:
return self._get_by_key(key, self.ip_address)
ip_address = self.module.params.get('ip_address')
if not ip_address:
self.module.fail_json(msg="IP address param 'ip_address' is required")
args = {}
args['ipaddress'] = ip_address
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
ip_addresses = self.cs.listPublicIpAddresses(**args)
if not ip_addresses:
self.module.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
self.ip_address = ip_addresses['publicipaddress'][0]
return self._get_by_key(key, self.ip_address)
def get_vm(self, key=None):
if self.vm:
return self._get_by_key(key, self.vm)
vm = self.module.params.get('vm')
if not vm:
self.module.fail_json(msg="Virtual machine param 'vm' is required")
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['zoneid'] = self.get_zone(key='id')
vms = self.cs.listVirtualMachines(**args)
if vms:
for v in vms['virtualmachine']:
if vm in [ v['name'], v['displayname'], v['id'] ]:
self.vm = v
return self._get_by_key(key, self.vm)
self.module.fail_json(msg="Virtual machine '%s' not found" % vm)
def get_zone(self, key=None):
if self.zone:
return self._get_by_key(key, self.zone)
zone = self.module.params.get('zone')
zones = self.cs.listZones()
# use the first zone if no zone param given
if not zone:
self.zone = zones['zone'][0]
return self._get_by_key(key, self.zone)
if zones:
for z in zones['zone']:
if zone in [ z['name'], z['id'] ]:
self.zone = z
return self._get_by_key(key, self.zone)
self.module.fail_json(msg="zone '%s' not found" % zone)
def get_os_type(self, key=None):
if self.os_type:
return self._get_by_key(key, self.zone)
os_type = self.module.params.get('os_type')
if not os_type:
return None
os_types = self.cs.listOsTypes()
if os_types:
for o in os_types['ostype']:
if os_type in [ o['description'], o['id'] ]:
self.os_type = o
return self._get_by_key(key, self.os_type)
self.module.fail_json(msg="OS type '%s' not found" % os_type)
def get_hypervisor(self):
if self.hypervisor:
return self.hypervisor
hypervisor = self.module.params.get('hypervisor')
hypervisors = self.cs.listHypervisors()
# use the first hypervisor if no hypervisor param given
if not hypervisor:
self.hypervisor = hypervisors['hypervisor'][0]['name']
return self.hypervisor
for h in hypervisors['hypervisor']:
if hypervisor.lower() == h['name'].lower():
self.hypervisor = h['name']
return self.hypervisor
self.module.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
def get_account(self, key=None):
if self.account:
return self._get_by_key(key, self.account)
account = self.module.params.get('account')
if not account:
return None
domain = self.module.params.get('domain')
if not domain:
self.module.fail_json(msg="Account must be specified with Domain")
args = {}
args['name'] = account
args['domainid'] = self.get_domain(key='id')
args['listall'] = True
accounts = self.cs.listAccounts(**args)
if accounts:
self.account = accounts['account'][0]
return self._get_by_key(key, self.account)
self.module.fail_json(msg="Account '%s' not found" % account)
def get_domain(self, key=None):
if self.domain:
return self._get_by_key(key, self.domain)
domain = self.module.params.get('domain')
if not domain:
return None
args = {}
args['listall'] = True
domains = self.cs.listDomains(**args)
if domains:
for d in domains['domain']:
if d['path'].lower() in [ domain.lower(), "root/" + domain.lower(), "root" + domain.lower() ]:
self.domain = d
return self._get_by_key(key, self.domain)
self.module.fail_json(msg="Domain '%s' not found" % domain)
def get_tags(self, resource=None):
existing_tags = self.cs.listTags(resourceid=resource['id'])
if existing_tags:
return existing_tags['tag']
return []
def _delete_tags(self, resource, resource_type, tags):
existing_tags = resource['tags']
tags_to_delete = []
for existing_tag in existing_tags:
if existing_tag['key'] in tags:
if existing_tag['value'] != tags[key]:
tags_to_delete.append(existing_tag)
else:
tags_to_delete.append(existing_tag)
if tags_to_delete:
self.result['changed'] = True
if not self.module.check_mode:
args = {}
args['resourceids'] = resource['id']
args['resourcetype'] = resource_type
args['tags'] = tags_to_delete
self.cs.deleteTags(**args)
def _create_tags(self, resource, resource_type, tags):
tags_to_create = []
for i, tag_entry in enumerate(tags):
tag = {
'key': tag_entry['key'],
'value': tag_entry['value'],
}
tags_to_create.append(tag)
if tags_to_create:
self.result['changed'] = True
if not self.module.check_mode:
args = {}
args['resourceids'] = resource['id']
args['resourcetype'] = resource_type
args['tags'] = tags_to_create
self.cs.createTags(**args)
def ensure_tags(self, resource, resource_type=None):
if not resource_type or not resource:
self.module.fail_json(msg="Error: Missing resource or resource_type for tags.")
if 'tags' in resource:
tags = self.module.params.get('tags')
if tags is not None:
self._delete_tags(resource, resource_type, tags)
self._create_tags(resource, resource_type, tags)
resource['tags'] = self.get_tags(resource)
return resource
def get_capabilities(self, key=None):
if self.capabilities:
return self._get_by_key(key, self.capabilities)
capabilities = self.cs.listCapabilities()
self.capabilities = capabilities['capability']
return self._get_by_key(key, self.capabilities)
# TODO: for backward compatibility only, remove if not used anymore
def _poll_job(self, job=None, key=None):
return self.poll_job(job=job, key=key)
def poll_job(self, job=None, key=None):
if 'jobid' in job:
while True:
res = self.cs.queryAsyncJobResult(jobid=job['jobid'])
if res['jobstatus'] != 0 and 'jobresult' in res:
if 'errortext' in res['jobresult']:
self.module.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext'])
if key and key in res['jobresult']:
job = res['jobresult'][key]
break
time.sleep(2)
return job
def get_result(self, resource):
if resource:
returns = self.common_returns.copy()
returns.update(self.returns)
for search_key, return_key in returns.iteritems():
if search_key in resource:
self.result[return_key] = resource[search_key]
# Special handling for tags
if 'tags' in resource:
self.result['tags'] = []
for tag in resource['tags']:
result_tag = {}
result_tag['key'] = tag['key']
result_tag['value'] = tag['value']
self.result['tags'].append(result_tag)
return self.result
|
gpl-3.0
|
iansf/engine
|
third_party/jinja2/tests.py
|
638
|
3444
|
# -*- coding: utf-8 -*-
"""
jinja2.tests
~~~~~~~~~~~~
Jinja test functions. Used with the "is" operator.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
from jinja2.runtime import Undefined
from jinja2._compat import text_type, string_types, mapping_types
number_re = re.compile(r'^-?\d+(\.\d+)?$')
regex_type = type(number_re)
test_callable = callable
def test_odd(value):
"""Return true if the variable is odd."""
return value % 2 == 1
def test_even(value):
"""Return true if the variable is even."""
return value % 2 == 0
def test_divisibleby(value, num):
"""Check if a variable is divisible by a number."""
return value % num == 0
def test_defined(value):
"""Return true if the variable is defined:
.. sourcecode:: jinja
{% if variable is defined %}
value of variable: {{ variable }}
{% else %}
variable is not defined
{% endif %}
See the :func:`default` filter for a simple way to set undefined
variables.
"""
return not isinstance(value, Undefined)
def test_undefined(value):
"""Like :func:`defined` but the other way round."""
return isinstance(value, Undefined)
def test_none(value):
"""Return true if the variable is none."""
return value is None
def test_lower(value):
"""Return true if the variable is lowercased."""
return text_type(value).islower()
def test_upper(value):
"""Return true if the variable is uppercased."""
return text_type(value).isupper()
def test_string(value):
"""Return true if the object is a string."""
return isinstance(value, string_types)
def test_mapping(value):
"""Return true if the object is a mapping (dict etc.).
.. versionadded:: 2.6
"""
return isinstance(value, mapping_types)
def test_number(value):
"""Return true if the variable is a number."""
return isinstance(value, (int, float, complex))
def test_sequence(value):
"""Return true if the variable is a sequence. Sequences are variables
that are iterable.
"""
try:
len(value)
value.__getitem__
except:
return False
return True
def test_sameas(value, other):
"""Check if an object points to the same memory address than another
object:
.. sourcecode:: jinja
{% if foo.attribute is sameas false %}
the foo attribute really is the `False` singleton
{% endif %}
"""
return value is other
def test_iterable(value):
"""Check if it's possible to iterate over an object."""
try:
iter(value)
except TypeError:
return False
return True
def test_escaped(value):
"""Check if the value is escaped."""
return hasattr(value, '__html__')
TESTS = {
'odd': test_odd,
'even': test_even,
'divisibleby': test_divisibleby,
'defined': test_defined,
'undefined': test_undefined,
'none': test_none,
'lower': test_lower,
'upper': test_upper,
'string': test_string,
'mapping': test_mapping,
'number': test_number,
'sequence': test_sequence,
'iterable': test_iterable,
'callable': test_callable,
'sameas': test_sameas,
'escaped': test_escaped
}
|
bsd-3-clause
|
zhaogaolong/oneFinger
|
openstack/api/opentack_ansible.py
|
1
|
1743
|
#!/usr/bin/env python
# coding:utf8
# import pdb
import ansible.runner
from one_finger.cloud_logging import cloud_logging as logging
log = logging.logger
class CmmAndRun():
def __init__(self, module_name='shell', host=None, cmd=None, timeout=20):
self.username = 'root'
self.module_name = module_name
self.host = host
self.cmd = cmd
self.timeout = timeout
self.update_ansible_hosts()
def update_ansible_hosts(self):
status = False
b = open('/etc/ansible/hosts')
for line in b.readlines():
if self.host in line:
status = True
b.close()
if not status:
b = open('/etc/ansible/hosts','a')
b.writelines(self.host)
b.writelines('\n')
b.close()
def start(self):
runner = ansible.runner.Runner(
module_name=self.module_name,
module_args=self.cmd,
pattern=self.host,
timeout=self.timeout,
)
log.debug('ansible %s RunCommand: %s' % (self.host, self.cmd))
# import pdb
# pdb.set_trace()
datastructure = runner.run()
# print datastructure
log.debug('ansible sttout %s' % datastructure)
# print datastructure
if datastructure['dark']:
pass
else:
if not datastructure['contacted'][self.host]['rc']:
data = datastructure['contacted'][self.host]['stdout']
return data
else:
return None
if __name__ == '__main__':
ac = CmmAndRun(host='172.16.254.1', cmd='date')
print ac.start()
|
apache-2.0
|
CanalTP/navitia
|
source/jormungandr/jormungandr/interfaces/v1/JSONSchema.py
|
3
|
5011
|
# coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import
import re
from flask_restful import Resource
from serpy.fields import MethodField
from flask import request
from jormungandr import app, _version
import serpy
from jormungandr.interfaces.v1.serializer.base import LiteralField, LambdaField
from jormungandr.interfaces.v1.serializer.jsonschema.serializer import SwaggerPathSerializer
from jormungandr.interfaces.v1.swagger_schema import make_schema, Swagger, ARGS_REGEXP
BASE_PATH = 'v1'
def set_definitions_in_rule(self, rule):
return re.sub(r'<(?P<name>.*?):.*?>', self.definition_repl, rule)
def format_args(rule):
"""format argument like swagger : {arg1}&{arg2}"""
formatted_rule = ARGS_REGEXP.sub(lambda m: '{' + m.group('name') + '}', rule)
return formatted_rule
base_path_regexp = re.compile('^/{base}'.format(base=BASE_PATH))
def get_all_described_paths():
"""
fetch the description of all api routes that have an 'OPTIONS' endpoint
"""
swagger = Swagger()
for endpoint, rules in app.url_map._rules_by_endpoint.items():
for rule in rules:
if 'OPTIONS' not in rule.methods or rule.provide_automatic_options:
continue
if rule.hide:
# we might want to hide some rule
continue
view_function = app.view_functions.get(endpoint)
if view_function is not None:
view_class = view_function.view_class
resource = view_class()
schema_path = make_schema(resource=resource, rule=rule)
# the definitions are stored aside and referenced in the response
swagger.definitions.update(schema_path.definitions)
formated_rule = format_args(rule.rule)
# we trim the base path
formated_rule = base_path_regexp.sub('', formated_rule)
swagger.paths[formated_rule] = schema_path
return swagger
class JsonSchemaInfo(serpy.Serializer):
title = LiteralField('navitia')
version = LiteralField(_version.__version__)
description = LiteralField(
"""
navitia.io is the open API for building cool stuff with mobility data. It provides the following services
* journeys computation
* line schedules
* next departures
* exploration of public transport data / search places
* and sexy things such as isochrones
navitia is a HATEOAS API that returns JSON formated results
"""
)
contact = LiteralField(
{'name': 'Navitia', 'url': 'https://www.navitia.io/', 'email': 'navitia@googlegroups.com'}
)
license = LiteralField({'name': 'license', 'url': 'https://www.navitia.io/api-term-of-use'})
class SecurityDefinitionsSerializer(serpy.Serializer):
basicAuth = LiteralField({'type': 'basic'})
class JsonSchemaEndpointsSerializer(serpy.Serializer):
basePath = LiteralField('/' + BASE_PATH)
swagger = LiteralField('2.0')
host = LambdaField(lambda *args: request.url_root.replace('http://', '').replace('https://', '').rstrip('/'))
paths = MethodField()
definitions = serpy.Field()
info = LambdaField(lambda s, o: JsonSchemaInfo(o).data)
securityDefinitions = LambdaField(lambda s, o: SecurityDefinitionsSerializer(o).data)
security = LiteralField([{'basicAuth': []}])
def get_paths(self, obj):
return {k: SwaggerPathSerializer(v).data for k, v in obj.paths.items()}
class Schema(Resource):
def __init__(self, **kwargs):
Resource.__init__(self, **kwargs)
def get(self):
"""
endpoint to get the swagger schema of Navitia
"""
path = get_all_described_paths()
return JsonSchemaEndpointsSerializer(path).data, 200
|
agpl-3.0
|
moiseshiraldo/inviMarket
|
inviMarket/views/register.py
|
1
|
2814
|
# -*- coding: utf-8 -*-
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from django.core.mail import send_mail
from django.conf import settings
from django.utils.http import urlsafe_base64_encode
from django.utils.encoding import force_bytes
from django.utils import timezone
from django.utils.translation import ugettext as _
import hashlib
import datetime
import random
from inviMarket.models import Profile
from inviMarket.forms import RegisterForm
def register(request):
"""
Display the user registration form and store the :model:`auth.User` and
his :model:`inviMarket.Profile` in the database.
**Context**
``form``
An instace of the user registration form.
``error``
A string variable containing any general error message.
**Template:**
:template:`inviMarket/register.html`
"""
error = None
if request.user.is_authenticated():
return redirect('index')
if request.method == 'POST':
form = RegisterForm(request.POST)
if 'terms' not in request.POST:
error= _("You must read and accept the terms and conditions.")
elif form.is_valid():
if form.cleaned_data['last_name'] != "":
return redirect('confirm')
new_user = form.save()
# Create a random activation key and store it in the user profile
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
activation_key = hashlib.sha1(salt+new_user.email).hexdigest()
key_expires = timezone.now() + datetime.timedelta(2)
lang = request.LANGUAGE_CODE
profile = Profile(user=new_user, activation_key=activation_key,
key_expires=key_expires, lang=lang, last_visit=timezone.now())
profile.save()
# Send the activation key to the user
text = render_to_string('email/activation.txt',
{'name': new_user.first_name,
'uidb64': urlsafe_base64_encode(force_bytes(new_user.id)),
'key': activation_key,
'domain': settings.DOMAIN,
})
html = render_to_string('email/activation.html',
{'name': new_user.first_name,
'uidb64': urlsafe_base64_encode(force_bytes(new_user.id)),
'key': activation_key,
'domain': settings.DOMAIN,
})
subject = "Account activation"
send_mail(subject, text, "inviMarket <no-reply@inviMarket.com>",
[new_user.email], html_message=html,fail_silently=False)
return redirect('confirm')
else:
form = RegisterForm()
return render(request, 'register.html', {'form': form, 'error': error})
|
agpl-3.0
|
mojeto/django
|
django/contrib/gis/db/models/aggregates.py
|
9
|
2194
|
from django.contrib.gis.db.models.fields import ExtentField
from django.db.models.aggregates import Aggregate
__all__ = ['Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union']
class GeoAggregate(Aggregate):
function = None
is_extent = False
def as_sql(self, compiler, connection):
# this will be called again in parent, but it's needed now - before
# we get the spatial_aggregate_name
connection.ops.check_expression_support(self)
self.function = connection.ops.spatial_aggregate_name(self.name)
return super().as_sql(compiler, connection)
def as_oracle(self, compiler, connection):
if not hasattr(self, 'tolerance'):
self.tolerance = 0.05
self.extra['tolerance'] = self.tolerance
if not self.is_extent:
self.template = '%(function)s(SDOAGGRTYPE(%(expressions)s,%(tolerance)s))'
return self.as_sql(compiler, connection)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
for expr in c.get_source_expressions():
if not hasattr(expr.field, 'geom_type'):
raise ValueError('Geospatial aggregates only allowed on geometry fields.')
return c
class Collect(GeoAggregate):
name = 'Collect'
class Extent(GeoAggregate):
name = 'Extent'
is_extent = '2D'
def __init__(self, expression, **extra):
super().__init__(expression, output_field=ExtentField(), **extra)
def convert_value(self, value, expression, connection, context):
return connection.ops.convert_extent(value, context.get('transformed_srid'))
class Extent3D(GeoAggregate):
name = 'Extent3D'
is_extent = '3D'
def __init__(self, expression, **extra):
super().__init__(expression, output_field=ExtentField(), **extra)
def convert_value(self, value, expression, connection, context):
return connection.ops.convert_extent3d(value, context.get('transformed_srid'))
class MakeLine(GeoAggregate):
name = 'MakeLine'
class Union(GeoAggregate):
name = 'Union'
|
bsd-3-clause
|
michelts/lettuce
|
tests/integration/lib/Django-1.3/tests/regressiontests/get_or_create_regress/tests.py
|
88
|
2540
|
from django.test import TestCase
from models import Author, Publisher
class GetOrCreateTests(TestCase):
def test_related(self):
p = Publisher.objects.create(name="Acme Publishing")
# Create a book through the publisher.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertTrue(created)
# The publisher should have one book.
self.assertEqual(p.books.count(), 1)
# Try get_or_create again, this time nothing should be created.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertFalse(created)
# And the publisher should still have one book.
self.assertEqual(p.books.count(), 1)
# Add an author to the book.
ed, created = book.authors.get_or_create(name="Ed")
self.assertTrue(created)
# The book should have one author.
self.assertEqual(book.authors.count(), 1)
# Try get_or_create again, this time nothing should be created.
ed, created = book.authors.get_or_create(name="Ed")
self.assertFalse(created)
# And the book should still have one author.
self.assertEqual(book.authors.count(), 1)
# Add a second author to the book.
fred, created = book.authors.get_or_create(name="Fred")
self.assertTrue(created)
# The book should have two authors now.
self.assertEqual(book.authors.count(), 2)
# Create an Author not tied to any books.
Author.objects.create(name="Ted")
# There should be three Authors in total. The book object should have two.
self.assertEqual(Author.objects.count(), 3)
self.assertEqual(book.authors.count(), 2)
# Try creating a book through an author.
_, created = ed.books.get_or_create(name="Ed's Recipes", publisher=p)
self.assertTrue(created)
# Now Ed has two Books, Fred just one.
self.assertEqual(ed.books.count(), 2)
self.assertEqual(fred.books.count(), 1)
# Use the publisher's primary key value instead of a model instance.
_, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id)
self.assertTrue(created)
# Try get_or_create again, this time nothing should be created.
_, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id)
self.assertFalse(created)
# The publisher should have three books.
self.assertEqual(p.books.count(), 3)
|
gpl-3.0
|
Anonymous-X6/django
|
django/views/generic/detail.py
|
306
|
6922
|
from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.http import Http404
from django.utils.translation import ugettext as _
from django.views.generic.base import ContextMixin, TemplateResponseMixin, View
class SingleObjectMixin(ContextMixin):
"""
Provides the ability to retrieve a single object for further manipulation.
"""
model = None
queryset = None
slug_field = 'slug'
context_object_name = None
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
query_pk_and_slug = False
def get_object(self, queryset=None):
"""
Returns the object the view is displaying.
By default this requires `self.queryset` and a `pk` or `slug` argument
in the URLconf, but subclasses can override this to return any object.
"""
# Use a custom queryset if provided; this is required for subclasses
# like DateDetailView
if queryset is None:
queryset = self.get_queryset()
# Next, try looking up by primary key.
pk = self.kwargs.get(self.pk_url_kwarg)
slug = self.kwargs.get(self.slug_url_kwarg)
if pk is not None:
queryset = queryset.filter(pk=pk)
# Next, try looking up by slug.
if slug is not None and (pk is None or self.query_pk_and_slug):
slug_field = self.get_slug_field()
queryset = queryset.filter(**{slug_field: slug})
# If none of those are defined, it's an error.
if pk is None and slug is None:
raise AttributeError("Generic detail view %s must be called with "
"either an object pk or a slug."
% self.__class__.__name__)
try:
# Get the single item from the filtered queryset
obj = queryset.get()
except queryset.model.DoesNotExist:
raise Http404(_("No %(verbose_name)s found matching the query") %
{'verbose_name': queryset.model._meta.verbose_name})
return obj
def get_queryset(self):
"""
Return the `QuerySet` that will be used to look up the object.
Note that this method is called by the default implementation of
`get_object` and may not be called if `get_object` is overridden.
"""
if self.queryset is None:
if self.model:
return self.model._default_manager.all()
else:
raise ImproperlyConfigured(
"%(cls)s is missing a QuerySet. Define "
"%(cls)s.model, %(cls)s.queryset, or override "
"%(cls)s.get_queryset()." % {
'cls': self.__class__.__name__
}
)
return self.queryset.all()
def get_slug_field(self):
"""
Get the name of a slug field to be used to look up by slug.
"""
return self.slug_field
def get_context_object_name(self, obj):
"""
Get the name to use for the object.
"""
if self.context_object_name:
return self.context_object_name
elif isinstance(obj, models.Model):
if self.object._deferred:
obj = obj._meta.proxy_for_model
return obj._meta.model_name
else:
return None
def get_context_data(self, **kwargs):
"""
Insert the single object into the context dict.
"""
context = {}
if self.object:
context['object'] = self.object
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
context.update(kwargs)
return super(SingleObjectMixin, self).get_context_data(**context)
class BaseDetailView(SingleObjectMixin, View):
"""
A base view for displaying a single object
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
class SingleObjectTemplateResponseMixin(TemplateResponseMixin):
template_name_field = None
template_name_suffix = '_detail'
def get_template_names(self):
"""
Return a list of template names to be used for the request. May not be
called if render_to_response is overridden. Returns the following list:
* the value of ``template_name`` on the view (if provided)
* the contents of the ``template_name_field`` field on the
object instance that the view is operating upon (if available)
* ``<app_label>/<model_name><template_name_suffix>.html``
"""
try:
names = super(SingleObjectTemplateResponseMixin, self).get_template_names()
except ImproperlyConfigured:
# If template_name isn't specified, it's not a problem --
# we just start with an empty list.
names = []
# If self.template_name_field is set, grab the value of the field
# of that name from the object; this is the most specific template
# name, if given.
if self.object and self.template_name_field:
name = getattr(self.object, self.template_name_field, None)
if name:
names.insert(0, name)
# The least-specific option is the default <app>/<model>_detail.html;
# only use this if the object in question is a model.
if isinstance(self.object, models.Model):
object_meta = self.object._meta
if self.object._deferred:
object_meta = self.object._meta.proxy_for_model._meta
names.append("%s/%s%s.html" % (
object_meta.app_label,
object_meta.model_name,
self.template_name_suffix
))
elif hasattr(self, 'model') and self.model is not None and issubclass(self.model, models.Model):
names.append("%s/%s%s.html" % (
self.model._meta.app_label,
self.model._meta.model_name,
self.template_name_suffix
))
# If we still haven't managed to find any template names, we should
# re-raise the ImproperlyConfigured to alert the user.
if not names:
raise
return names
class DetailView(SingleObjectTemplateResponseMixin, BaseDetailView):
"""
Render a "detail" view of an object.
By default this is a model instance looked up from `self.queryset`, but the
view will support display of *any* object by overriding `self.get_object()`.
"""
|
bsd-3-clause
|
zenlambda/pip
|
pip/_vendor/cachecontrol/adapter.py
|
469
|
4196
|
import functools
from pip._vendor.requests.adapters import HTTPAdapter
from .controller import CacheController
from .cache import DictCache
from .filewrapper import CallbackFileWrapper
class CacheControlAdapter(HTTPAdapter):
invalidating_methods = set(['PUT', 'DELETE'])
def __init__(self, cache=None,
cache_etags=True,
controller_class=None,
serializer=None,
heuristic=None,
*args, **kw):
super(CacheControlAdapter, self).__init__(*args, **kw)
self.cache = cache or DictCache()
self.heuristic = heuristic
controller_factory = controller_class or CacheController
self.controller = controller_factory(
self.cache,
cache_etags=cache_etags,
serializer=serializer,
)
def send(self, request, **kw):
"""
Send a request. Use the request information to see if it
exists in the cache and cache the response if we need to and can.
"""
if request.method == 'GET':
cached_response = self.controller.cached_request(request)
if cached_response:
return self.build_response(request, cached_response,
from_cache=True)
# check for etags and add headers if appropriate
request.headers.update(
self.controller.conditional_headers(request)
)
resp = super(CacheControlAdapter, self).send(request, **kw)
return resp
def build_response(self, request, response, from_cache=False):
"""
Build a response by making a request or using the cache.
This will end up calling send and returning a potentially
cached response
"""
if not from_cache and request.method == 'GET':
# apply any expiration heuristics
if response.status == 304:
# We must have sent an ETag request. This could mean
# that we've been expired already or that we simply
# have an etag. In either case, we want to try and
# update the cache if that is the case.
cached_response = self.controller.update_cached_response(
request, response
)
if cached_response is not response:
from_cache = True
# We are done with the server response, read a
# possible response body (compliant servers will
# not return one, but we cannot be 100% sure) and
# release the connection back to the pool.
response.read(decode_content=False)
response.release_conn()
response = cached_response
# We always cache the 301 responses
elif response.status == 301:
self.controller.cache_response(request, response)
else:
# Check for any heuristics that might update headers
# before trying to cache.
if self.heuristic:
response = self.heuristic.apply(response)
# Wrap the response file with a wrapper that will cache the
# response when the stream has been consumed.
response._fp = CallbackFileWrapper(
response._fp,
functools.partial(
self.controller.cache_response,
request,
response,
)
)
resp = super(CacheControlAdapter, self).build_response(
request, response
)
# See if we should invalidate the cache.
if request.method in self.invalidating_methods and resp.ok:
cache_url = self.controller.cache_url(request.url)
self.cache.delete(cache_url)
# Give the request a from_cache attr to let people use it
resp.from_cache = from_cache
return resp
def close(self):
self.cache.close()
super(CacheControlAdapter, self).close()
|
mit
|
jamal-ahmad/Logging-Kernel
|
tools/perf/scripts/python/event_analyzing_sample.py
|
4719
|
7393
|
# event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
|
gpl-2.0
|
scealiontach/cryptotrading
|
src/autotrader/hashnest.py
|
1
|
4020
|
import urllib,urllib.parse,urllib.request,urllib.error
import json
import time,datetime
import hashlib,hmac,base64
import logging
LOG=logging.getLogger(__name__)
#SELL='sale'
#BUY='purchase'
class hashnest(object):
URL = 'https://www.hashnest.com/api/v1/'
def __init__(self,username,key,secret):
self.username=username
self.key=key
self.secret=secret
def get_nonce(self):
self.utcnow=a=datetime.datetime.utcnow()
b=datetime.datetime(1970,1,1,0,0,0,0)
self.nonce= int((a-b).total_seconds()*1000)
return self.nonce
def signature(self,req):
nonce=self.get_nonce()
message = str(nonce) + self.username + self.key
req['access_key']=self.key
req['nonce']=nonce
req['signature']= hmac.new(self.secret.encode(), msg=message.encode(), digestmod=hashlib.sha256).hexdigest()
return urllib.parse.urlencode(req)
def request(self,url,req={}):
url = self.URL + url
data= self.signature(req)
url=url+'?'+data
req = urllib.request.Request(url, method='POST')
retry=True
retry_count=0
while retry:
try:
with urllib.request.urlopen(req) as resp:
r=resp.read()
retObj=json.loads(r.decode())
time.sleep(1)
retry=False
except urllib.error.HTTPError as e:
if e.code!=401:
raise e
else:
if retry_count<10:
time.sleep(retry_count**2)
retry_count+=1
else:
raise e
pass
pass
pass
pass
return retObj
def get_account_info(self):
return self.request('account')
def get_account_balance(self):
return self.request('currency_accounts')
def get_account_hashrate(self):
return self.request('hash_accounts')
def get_account_orders(self,cmi):
param={'currency_market_id':cmi}
return self.request('orders/active',param)
def get_account_trade_history(self,cmi,page=1,page_amount=10):
param={'currency_market_id':cmi}
param['page']=page
param['page_per_amount']=page_amount
return self.request('orders/history',param)
def create_order(self,cmi,amount,ppc,category):
param={'currency_market_id':cmi}
param['amount']=amount
param['ppc']=ppc
param['category']=category
return self.request('orders',param)
def cancel_order(self,order_id):
param={'order_id':order_id}
return self.request('orders/revoke',param)
def cancel_all_orders(self,cmi,category):
param={'currency_market_id':cmi}
param['category']=category
return self.request('orders/quick_revoke',param)
def get_opened_markets(self):
return self.request('currency_markets')
def get_book(self,cmi):
param={'currency_market_id':cmi}
return self.request('currency_markets/orders',param)
def get_trade_history(self,cmi,page=1,page_amount=10):
param={'currency_market_id':cmi}
param['page']=page
param['page_per_amount']=page_amount
param['page_size']=page_amount
return self.request('currency_markets/order_history',param)
def pretty_print_POST(req):
"""
At this point it is completely built and ready
to be fired; it is "prepared".
However pay attention at the formatting used in
this function because it is programmed to be pretty
printed and may differ from the actual request.
"""
return '{}\n{}\n{}\n\n{}'.format(
'-----------START-----------',
req.get_method() + ' ' + req.get_full_url(),
'\n'.join('{}: {}'.format(k, v) for k, v in req.header_items()),
req.data,
)
|
apache-2.0
|
tuskar/tuskar
|
tuskar/openstack/common/rpc/impl_kombu.py
|
2
|
31906
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import socket
import ssl
import sys
import time
import uuid
import eventlet
import greenlet
import kombu
import kombu.connection
import kombu.entity
import kombu.messaging
from oslo.config import cfg
from tuskar.openstack.common.gettextutils import _
from tuskar.openstack.common import network_utils
from tuskar.openstack.common.rpc import amqp as rpc_amqp
from tuskar.openstack.common.rpc import common as rpc_common
kombu_opts = [
cfg.StrOpt('kombu_ssl_version',
default='',
help='SSL version to use (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_keyfile',
default='',
help='SSL key file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_certfile',
default='',
help='SSL cert file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_ca_certs',
default='',
help=('SSL certification authority file '
'(valid only if SSL enabled)')),
cfg.StrOpt('rabbit_host',
default='localhost',
help='The RabbitMQ broker address where a single node is used'),
cfg.IntOpt('rabbit_port',
default=5672,
help='The RabbitMQ broker port where a single node is used'),
cfg.ListOpt('rabbit_hosts',
default=['$rabbit_host:$rabbit_port'],
help='RabbitMQ HA cluster host:port pairs'),
cfg.BoolOpt('rabbit_use_ssl',
default=False,
help='connect over SSL for RabbitMQ'),
cfg.StrOpt('rabbit_userid',
default='guest',
help='the RabbitMQ userid'),
cfg.StrOpt('rabbit_password',
default='guest',
help='the RabbitMQ password',
secret=True),
cfg.StrOpt('rabbit_virtual_host',
default='/',
help='the RabbitMQ virtual host'),
cfg.IntOpt('rabbit_retry_interval',
default=1,
help='how frequently to retry connecting with RabbitMQ'),
cfg.IntOpt('rabbit_retry_backoff',
default=2,
help='how long to backoff for between retries when connecting '
'to RabbitMQ'),
cfg.IntOpt('rabbit_max_retries',
default=0,
help='maximum retries with trying to connect to RabbitMQ '
'(the default of 0 implies an infinite retry count)'),
cfg.BoolOpt('rabbit_durable_queues',
default=False,
help='use durable queues in RabbitMQ'),
cfg.BoolOpt('rabbit_ha_queues',
default=False,
help='use H/A queues in RabbitMQ (x-ha-policy: all).'
'You need to wipe RabbitMQ database when '
'changing this option.'),
]
cfg.CONF.register_opts(kombu_opts)
LOG = rpc_common.LOG
def _get_queue_arguments(conf):
"""Construct the arguments for declaring a queue.
If the rabbit_ha_queues option is set, we declare a mirrored queue
as described here:
http://www.rabbitmq.com/ha.html
Setting x-ha-policy to all means that the queue will be mirrored
to all nodes in the cluster.
"""
return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {}
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, channel, callback, tag, **kwargs):
"""Declare a queue on an amqp channel.
'channel' is the amqp channel to use
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
queue name, exchange name, and other kombu options are
passed in here as a dictionary.
"""
self.callback = callback
self.tag = str(tag)
self.kwargs = kwargs
self.queue = None
self.reconnect(channel)
def reconnect(self, channel):
"""Re-declare the queue after a rabbit reconnect"""
self.channel = channel
self.kwargs['channel'] = channel
self.queue = kombu.entity.Queue(**self.kwargs)
self.queue.declare()
def consume(self, *args, **kwargs):
"""Actually declare the consumer on the amqp channel. This will
start the flow of messages from the queue. Using the
Connection.iterconsume() iterator will process the messages,
calling the appropriate callback.
If a callback is specified in kwargs, use that. Otherwise,
use the callback passed during __init__()
If kwargs['nowait'] is True, then this call will block until
a message is read.
Messages will automatically be acked if the callback doesn't
raise an exception
"""
options = {'consumer_tag': self.tag}
options['nowait'] = kwargs.get('nowait', False)
callback = kwargs.get('callback', self.callback)
if not callback:
raise ValueError("No callback defined")
def _callback(raw_message):
message = self.channel.message_to_python(raw_message)
try:
msg = rpc_common.deserialize_msg(message.payload)
callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
message.ack()
self.queue.consume(*args, callback=_callback, **options)
def cancel(self):
"""Cancel the consuming from the queue, if it has started"""
try:
self.queue.cancel(self.tag)
except KeyError as e:
# NOTE(comstud): Kludge to get around a amqplib bug
if str(e) != "u'%s'" % self.tag:
raise
self.queue = None
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'"""
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
"""Init a 'direct' queue.
'channel' is the amqp channel to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
Other kombu options may be passed
"""
# Default options
options = {'durable': False,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=msg_id,
type='direct',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(DirectConsumer, self).__init__(channel,
callback,
tag,
name=msg_id,
exchange=exchange,
routing_key=msg_id,
**options)
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'"""
def __init__(self, conf, channel, topic, callback, tag, name=None,
exchange_name=None, **kwargs):
"""Init a 'topic' queue.
:param channel: the amqp channel to use
:param topic: the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param tag: a unique ID for the consumer on the channel
:param name: optional queue name, defaults to topic
:paramtype name: str
Other kombu options may be passed as keyword arguments
"""
# Default options
options = {'durable': conf.rabbit_durable_queues,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': False,
'exclusive': False}
options.update(kwargs)
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
exchange = kombu.entity.Exchange(name=exchange_name,
type='topic',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(TopicConsumer, self).__init__(channel,
callback,
tag,
name=name or topic,
exchange=exchange,
routing_key=topic,
**options)
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'"""
def __init__(self, conf, channel, topic, callback, tag, **kwargs):
"""Init a 'fanout' queue.
'channel' is the amqp channel to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
Other kombu options may be passed
"""
unique = uuid.uuid4().hex
exchange_name = '%s_fanout' % topic
queue_name = '%s_fanout_%s' % (topic, unique)
# Default options
options = {'durable': False,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(FanoutConsumer, self).__init__(channel, callback, tag,
name=queue_name,
exchange=exchange,
routing_key=topic,
**options)
class Publisher(object):
"""Base Publisher class"""
def __init__(self, channel, exchange_name, routing_key, **kwargs):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.exchange_name = exchange_name
self.routing_key = routing_key
self.kwargs = kwargs
self.reconnect(channel)
def reconnect(self, channel):
"""Re-establish the Producer after a rabbit reconnection"""
self.exchange = kombu.entity.Exchange(name=self.exchange_name,
**self.kwargs)
self.producer = kombu.messaging.Producer(exchange=self.exchange,
channel=channel,
routing_key=self.routing_key)
def send(self, msg, timeout=None):
"""Send a message"""
if timeout:
#
# AMQP TTL is in milliseconds when set in the header.
#
self.producer.publish(msg, headers={'ttl': (timeout * 1000)})
else:
self.producer.publish(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'"""
def __init__(self, conf, channel, msg_id, **kwargs):
"""init a 'direct' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': False,
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
type='direct', **options)
class TopicPublisher(Publisher):
"""Publisher class for 'topic'"""
def __init__(self, conf, channel, topic, **kwargs):
"""init a 'topic' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': conf.rabbit_durable_queues,
'auto_delete': False,
'exclusive': False}
options.update(kwargs)
exchange_name = rpc_amqp.get_control_exchange(conf)
super(TopicPublisher, self).__init__(channel,
exchange_name,
topic,
type='topic',
**options)
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'"""
def __init__(self, conf, channel, topic, **kwargs):
"""init a 'fanout' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': False,
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
None, type='fanout', **options)
class NotifyPublisher(TopicPublisher):
"""Publisher class for 'notify'"""
def __init__(self, conf, channel, topic, **kwargs):
self.durable = kwargs.pop('durable', conf.rabbit_durable_queues)
self.queue_arguments = _get_queue_arguments(conf)
super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
def reconnect(self, channel):
super(NotifyPublisher, self).reconnect(channel)
# NOTE(jerdfelt): Normally the consumer would create the queue, but
# we do this to ensure that messages don't get dropped if the
# consumer is started after we do
queue = kombu.entity.Queue(channel=channel,
exchange=self.exchange,
durable=self.durable,
name=self.routing_key,
routing_key=self.routing_key,
queue_arguments=self.queue_arguments)
queue.declare()
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
self.consumers = []
self.consumer_thread = None
self.proxy_callbacks = []
self.conf = conf
self.max_retries = self.conf.rabbit_max_retries
# Try forever?
if self.max_retries <= 0:
self.max_retries = None
self.interval_start = self.conf.rabbit_retry_interval
self.interval_stepping = self.conf.rabbit_retry_backoff
# max retry-interval = 30 seconds
self.interval_max = 30
self.memory_transport = False
if server_params is None:
server_params = {}
# Keys to translate from server_params to kombu params
server_params_to_kombu_params = {'username': 'userid'}
ssl_params = self._fetch_ssl_params()
params_list = []
for adr in self.conf.rabbit_hosts:
hostname, port = network_utils.parse_host_port(
adr, default_port=self.conf.rabbit_port)
params = {
'hostname': hostname,
'port': port,
'userid': self.conf.rabbit_userid,
'password': self.conf.rabbit_password,
'virtual_host': self.conf.rabbit_virtual_host,
}
for sp_key, value in server_params.iteritems():
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
params[p_key] = value
if self.conf.fake_rabbit:
params['transport'] = 'memory'
if self.conf.rabbit_use_ssl:
params['ssl'] = ssl_params
params_list.append(params)
self.params_list = params_list
self.memory_transport = self.conf.fake_rabbit
self.connection = None
self.reconnect()
def _fetch_ssl_params(self):
"""Handles fetching what ssl params
should be used for the connection (if any)"""
ssl_params = dict()
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
if self.conf.kombu_ssl_version:
ssl_params['ssl_version'] = self.conf.kombu_ssl_version
if self.conf.kombu_ssl_keyfile:
ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
if self.conf.kombu_ssl_certfile:
ssl_params['certfile'] = self.conf.kombu_ssl_certfile
if self.conf.kombu_ssl_ca_certs:
ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs
# We might want to allow variations in the
# future with this?
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
if not ssl_params:
# Just have the default behavior
return True
else:
# Return the extended behavior
return ssl_params
def _connect(self, params):
"""Connect to rabbit. Re-establish any queues that may have
been declared before if we are reconnecting. Exceptions should
be handled by the caller.
"""
if self.connection:
LOG.info(_("Reconnecting to AMQP server on "
"%(hostname)s:%(port)d") % params)
try:
self.connection.release()
except self.connection_errors:
pass
# Setting this in case the next statement fails, though
# it shouldn't be doing any network operations, yet.
self.connection = None
self.connection = kombu.connection.BrokerConnection(**params)
self.connection_errors = self.connection.connection_errors
if self.memory_transport:
# Kludge to speed up tests.
self.connection.transport.polling_interval = 0.0
self.consumer_num = itertools.count(1)
self.connection.connect()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
if self.memory_transport:
self.channel._new_queue('ae.undeliver')
for consumer in self.consumers:
consumer.reconnect(self.channel)
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
params)
def reconnect(self):
"""Handles reconnecting and re-establishing queues.
Will retry up to self.max_retries number of times.
self.max_retries = 0 means to retry forever.
Sleep between tries, starting at self.interval_start
seconds, backing off self.interval_stepping number of seconds
each attempt.
"""
attempt = 0
while True:
params = self.params_list[attempt % len(self.params_list)]
attempt += 1
try:
self._connect(params)
return
except (IOError, self.connection_errors) as e:
pass
except Exception as e:
# NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for
# a protocol response. (See paste link in LP888621)
# So, we check all exceptions for 'timeout' in them
# and try to reconnect in this case.
if 'timeout' not in str(e):
raise
log_info = {}
log_info['err_str'] = str(e)
log_info['max_retries'] = self.max_retries
log_info.update(params)
if self.max_retries and attempt == self.max_retries:
LOG.error(_('Unable to connect to AMQP server on '
'%(hostname)s:%(port)d after %(max_retries)d '
'tries: %(err_str)s') % log_info)
# NOTE(comstud): Copied from original code. There's
# really no better recourse because if this was a queue we
# need to consume on, we have no way to consume anymore.
sys.exit(1)
if attempt == 1:
sleep_time = self.interval_start or 1
elif attempt > 1:
sleep_time += self.interval_stepping
if self.interval_max:
sleep_time = min(sleep_time, self.interval_max)
log_info['sleep_time'] = sleep_time
LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
'unreachable: %(err_str)s. Trying again in '
'%(sleep_time)d seconds.') % log_info)
time.sleep(sleep_time)
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (self.connection_errors, socket.timeout, IOError) as e:
if error_callback:
error_callback(e)
except Exception as e:
# NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for
# a protocol response. (See paste link in LP888621)
# So, we check all exceptions for 'timeout' in them
# and try to reconnect in this case.
if 'timeout' not in str(e):
raise
if error_callback:
error_callback(e)
self.reconnect()
def get_channel(self):
"""Convenience call for bin/clear_rabbit_queues"""
return self.channel
def close(self):
"""Close/release this connection"""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.connection.release()
self.connection = None
def reset(self):
"""Reset a connection so it can be used again"""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.channel.close()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
if self.memory_transport:
self.channel._new_queue('ae.undeliver')
self.consumers = []
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.channel, topic, callback,
self.consumer_num.next())
self.consumers.append(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers"""
info = {'do_consume': True}
def _error_callback(exc):
if isinstance(exc, socket.timeout):
LOG.debug(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
info['do_consume'] = True
def _consume():
if info['do_consume']:
queues_head = self.consumers[:-1]
queues_tail = self.consumers[-1]
for queue in queues_head:
queue.consume(nowait=True)
queues_tail.consume(nowait=False)
info['do_consume'] = False
return self.connection.drain_events(timeout=timeout)
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread"""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def wait_on_proxy_callbacks(self):
"""Wait for all proxy callback threads to exit."""
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
"""Send to a publisher based on the publisher class"""
def _error_callback(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publish():
publisher = cls(self.conf, self.channel, topic, **kwargs)
publisher.send(msg, timeout)
self.ensure(_error_callback, _publish)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer"""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message"""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message"""
self.publisher_send(TopicPublisher, topic, msg, timeout)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message"""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic"""
self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
def consume(self, limit=None):
"""Consume from all queues/consumers"""
it = self.iterconsume(limit=limit)
while True:
try:
it.next()
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread"""
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object"""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
if fanout:
self.declare_fanout_consumer(topic, proxy_cb)
else:
self.declare_topic_consumer(topic, proxy_cb)
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object"""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
self.declare_topic_consumer(topic, proxy_cb, pool_name)
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
)
self.proxy_callbacks.append(callback_wrapper)
self.declare_topic_consumer(
queue_name=pool_name,
topic=topic,
exchange_name=exchange_name,
callback=callback_wrapper,
)
def create_connection(conf, new=True):
"""Create a connection"""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection),
envelope)
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
|
apache-2.0
|
jreut/Switcharoo
|
scraper/scraper/events.py
|
2
|
1340
|
# Copyright 2015 Adam Greenstein <adamgreenstein@comcast.net>
#
# Switcharoo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Switcharoo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Switcharoo. If not, see <http://www.gnu.org/licenses/>.
class EventsBase(object):
def on_creating_node(self):
print 'Creating new node in DB.'
def on_node_exists(self):
print 'Node exists'
def on_adding_to_queue(self, url):
url = url.encode('utf-8')
print 'Adding ' + url + ' to queue.'
def on_not_adding_to_queue(self, url):
url = url.encode('utf-8')
print 'Skipping ' + url + ' not adding to queue.'
def waiting(self, seconds):
print 'Waiting ' + str(seconds) + ' seconds for new submissions...'
def on_clearing_cache(self):
print "Clearing Cache"
def using_url(self, url):
print 'Using ' + url
|
gpl-3.0
|
mdeemer/XlsxWriter
|
xlsxwriter/test/styles/test_styles01.py
|
8
|
2940
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...styles import Styles
from ...workbook import Workbook
class TestAssembleStyles(unittest.TestCase):
"""
Test assembling a complete Styles file.
"""
def test_assemble_xml_file(self):
"""Test for styles.xml file with default styles."""
self.maxDiff = None
fh = StringIO()
style = Styles()
style._set_filehandle(fh)
workbook = Workbook()
workbook._prepare_format_properties()
style._set_style_properties([
workbook.xf_formats,
workbook.palette,
workbook.font_count,
workbook.num_format_count,
workbook.border_count,
workbook.fill_count,
workbook.custom_colors,
workbook.dxf_formats,
])
style._assemble_xml_file()
workbook.fileclosed = 1
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<fonts count="1">
<font>
<sz val="11"/>
<color theme="1"/>
<name val="Calibri"/>
<family val="2"/>
<scheme val="minor"/>
</font>
</fonts>
<fills count="2">
<fill>
<patternFill patternType="none"/>
</fill>
<fill>
<patternFill patternType="gray125"/>
</fill>
</fills>
<borders count="1">
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
</borders>
<cellStyleXfs count="1">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0"/>
</cellStyleXfs>
<cellXfs count="1">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0"/>
</cellXfs>
<cellStyles count="1">
<cellStyle name="Normal" xfId="0" builtinId="0"/>
</cellStyles>
<dxfs count="0"/>
<tableStyles count="0" defaultTableStyle="TableStyleMedium9" defaultPivotStyle="PivotStyleLight16"/>
</styleSheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
|
bsd-2-clause
|
sony/nnabla
|
python/test/utils/learning_rate_scheduler/test_cosine_scheduler.py
|
1
|
1249
|
# Copyright 2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from learning_rate_scheduler_test_utils import scheduler_tester
import nnabla.utils.learning_rate_scheduler as lrs
import math
class RefCosine(object):
def __init__(self, init_lr, max_iter):
self.init_lr = init_lr
self.max_iter = max_iter
def get_learning_rate(self, iter):
return self.init_lr * ((math.cos(iter * 1.0 / self.max_iter * math.pi) + 1.0) * 0.5)
@pytest.mark.parametrize("init_lr", [0.1, 0.01])
@pytest.mark.parametrize("max_iter", [1000, 10000])
def test_cosine_scheduler(init_lr, max_iter):
scheduler_tester(
lrs.CosineScheduler, RefCosine, max_iter, [init_lr, max_iter])
|
apache-2.0
|
mattstruble/crusty
|
crusty/graphics/colorama/winterm.py
|
523
|
4206
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
from . import win32
# from wincon.h
class WinColor(object):
BLACK = 0
BLUE = 1
GREEN = 2
CYAN = 3
RED = 4
MAGENTA = 5
YELLOW = 6
GREY = 7
# from wincon.h
class WinStyle(object):
NORMAL = 0x00 # dim text, dim background
BRIGHT = 0x08 # bright text, dim background
class WinTerm(object):
def __init__(self):
self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
self.set_attrs(self._default)
self._default_fore = self._fore
self._default_back = self._back
self._default_style = self._style
def get_attrs(self):
return self._fore + self._back * 16 + self._style
def set_attrs(self, value):
self._fore = value & 7
self._back = (value >> 4) & 7
self._style = value & WinStyle.BRIGHT
def reset_all(self, on_stderr=None):
self.set_attrs(self._default)
self.set_console(attrs=self._default)
def fore(self, fore=None, on_stderr=False):
if fore is None:
fore = self._default_fore
self._fore = fore
self.set_console(on_stderr=on_stderr)
def back(self, back=None, on_stderr=False):
if back is None:
back = self._default_back
self._back = back
self.set_console(on_stderr=on_stderr)
def style(self, style=None, on_stderr=False):
if style is None:
style = self._default_style
self._style = style
self.set_console(on_stderr=on_stderr)
def set_console(self, attrs=None, on_stderr=False):
if attrs is None:
attrs = self.get_attrs()
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleTextAttribute(handle, attrs)
def get_position(self, handle):
position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition
# Because Windows coordinates are 0-based,
# and win32.SetConsoleCursorPosition expects 1-based.
position.X += 1
position.Y += 1
return position
def set_cursor_position(self, position=None, on_stderr=False):
if position is None:
#I'm not currently tracking the position, so there is no default.
#position = self.get_position()
return
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleCursorPosition(handle, position)
def cursor_up(self, num_rows=0, on_stderr=False):
if num_rows == 0:
return
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
position = self.get_position(handle)
adjusted_position = (position.Y - num_rows, position.X)
self.set_cursor_position(adjusted_position, on_stderr)
def erase_data(self, mode=0, on_stderr=False):
# 0 (or None) should clear from the cursor to the end of the screen.
# 1 should clear from the cursor to the beginning of the screen.
# 2 should clear the entire screen. (And maybe move cursor to (1,1)?)
#
# At the moment, I only support mode 2. From looking at the API, it
# should be possible to calculate a different number of bytes to clear,
# and to do so relative to the cursor position.
if mode[0] not in (2,):
return
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
# here's where we'll home the cursor
coord_screen = win32.COORD(0,0)
csbi = win32.GetConsoleScreenBufferInfo(handle)
# get the number of character cells in the current buffer
dw_con_size = csbi.dwSize.X * csbi.dwSize.Y
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', dw_con_size, coord_screen)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), dw_con_size, coord_screen );
# put the cursor at (0, 0)
win32.SetConsoleCursorPosition(handle, (coord_screen.X, coord_screen.Y))
|
mit
|
foxmask/django-th
|
django_th/html_entities.py
|
1
|
1275
|
# coding: utf-8
import html.entities as htmlentities
import re
class HtmlEntities:
def __init__(self, my_string):
self.my_string = my_string
def html_entity_decode_char(self, m, defs=htmlentities.entitydefs):
"""
decode html entity into one of the html char
"""
try:
char = defs[m.group(1)]
return "&{char};".format(char=char)
except ValueError:
return m.group(0)
except KeyError:
return m.group(0)
def html_entity_decode_codepoint(self, m,
defs=htmlentities.codepoint2name):
"""
decode html entity into one of the codepoint2name
"""
try:
char = defs[m.group(1)]
return "&{char};".format(char=char)
except ValueError:
return m.group(0)
except KeyError:
return m.group(0)
@property
def html_entity_decode(self):
"""
entry point of this set of tools
to decode html entities
"""
pattern = re.compile(r"&#(\w+?);")
string = pattern.sub(self.html_entity_decode_char, self.my_string)
return pattern.sub(self.html_entity_decode_codepoint, string)
|
bsd-3-clause
|
llhe/tensorflow
|
tensorflow/contrib/slim/python/slim/learning_test.py
|
17
|
37002
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.learning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from numpy import testing as np_testing
from tensorflow.contrib.framework.python.ops import variables as variables_lib2
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.contrib.slim.python.slim import learning
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import saver as saver_lib
class ClipGradientNormsTest(test.TestCase):
def clip_values(self, arr):
norm = np.sqrt(np.sum(arr**2))
if norm > self._max_norm:
return self._max_norm * arr / np.sqrt(np.sum(arr**2))
return arr
def setUp(self):
np.random.seed(0)
self._max_norm = 1.0
self._grad_vec = np.array([1., 2., 3.])
self._clipped_grad_vec = self.clip_values(self._grad_vec)
self._zero_vec = np.zeros(self._grad_vec.size)
def testOrdinaryGradIsClippedCorrectly(self):
gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
variable = variables_lib.Variable(self._zero_vec, dtype=dtypes.float32)
gradients_to_variables = (gradient, variable)
[gradients_to_variables] = learning.clip_gradient_norms(
[gradients_to_variables], self._max_norm)
# Ensure the variable passed through.
self.assertEqual(gradients_to_variables[1], variable)
with self.test_session() as sess:
actual_gradient = sess.run(gradients_to_variables[0])
np_testing.assert_almost_equal(actual_gradient, self._clipped_grad_vec)
def testNoneGradPassesThroughCorrectly(self):
gradient = None
variable = variables_lib.Variable(self._zero_vec, dtype=dtypes.float32)
gradients_to_variables = (gradient, variable)
[gradients_to_variables] = learning.clip_gradient_norms(
[gradients_to_variables], self._max_norm)
self.assertEqual(gradients_to_variables[0], None)
self.assertEqual(gradients_to_variables[1], variable)
def testIndexedSlicesGradIsClippedCorrectly(self):
sparse_grad_indices = np.array([0, 1, 4])
sparse_grad_dense_shape = [self._grad_vec.size]
values = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
indices = constant_op.constant(sparse_grad_indices, dtype=dtypes.int32)
dense_shape = constant_op.constant(
sparse_grad_dense_shape, dtype=dtypes.int32)
gradient = ops.IndexedSlices(values, indices, dense_shape)
variable = variables_lib.Variable(self._zero_vec, dtype=dtypes.float32)
gradients_to_variables = (gradient, variable)
gradients_to_variables = learning.clip_gradient_norms(
[gradients_to_variables], self._max_norm)[0]
# Ensure the built IndexedSlice has the right form.
self.assertEqual(gradients_to_variables[1], variable)
self.assertEqual(gradients_to_variables[0].indices, indices)
self.assertEqual(gradients_to_variables[0].dense_shape, dense_shape)
with session.Session() as sess:
actual_gradient = sess.run(gradients_to_variables[0].values)
np_testing.assert_almost_equal(actual_gradient, self._clipped_grad_vec)
class MultiplyGradientsTest(test.TestCase):
def setUp(self):
np.random.seed(0)
self._multiplier = 3.7
self._grad_vec = np.array([1., 2., 3.])
self._multiplied_grad_vec = np.multiply(self._grad_vec, self._multiplier)
def testNonListGradsRaisesError(self):
gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
variable = variables_lib.Variable(array_ops.zeros_like(gradient))
grad_to_var = (gradient, variable)
gradient_multipliers = {variable: self._multiplier}
with self.assertRaises(ValueError):
learning.multiply_gradients(grad_to_var, gradient_multipliers)
def testEmptyMultiplesRaisesError(self):
gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
variable = variables_lib.Variable(array_ops.zeros_like(gradient))
grad_to_var = (gradient, variable)
with self.assertRaises(ValueError):
learning.multiply_gradients([grad_to_var], {})
def testNonDictMultiplierRaisesError(self):
gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
variable = variables_lib.Variable(array_ops.zeros_like(gradient))
grad_to_var = (gradient, variable)
with self.assertRaises(ValueError):
learning.multiply_gradients([grad_to_var], 3)
def testMultipleOfNoneGradRaisesError(self):
gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
variable = variables_lib.Variable(array_ops.zeros_like(gradient))
grad_to_var = (None, variable)
gradient_multipliers = {variable: self._multiplier}
with self.assertRaises(ValueError):
learning.multiply_gradients(grad_to_var, gradient_multipliers)
def testMultipleGradientsWithVariables(self):
gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
variable = variables_lib.Variable(array_ops.zeros_like(gradient))
grad_to_var = (gradient, variable)
gradient_multipliers = {variable: self._multiplier}
[grad_to_var] = learning.multiply_gradients([grad_to_var],
gradient_multipliers)
# Ensure the variable passed through.
self.assertEqual(grad_to_var[1], variable)
with self.test_session() as sess:
actual_gradient = sess.run(grad_to_var[0])
np_testing.assert_almost_equal(actual_gradient, self._multiplied_grad_vec,
5)
def testIndexedSlicesGradIsMultiplied(self):
values = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
indices = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
dense_shape = constant_op.constant(
[self._grad_vec.size], dtype=dtypes.int32)
gradient = ops.IndexedSlices(values, indices, dense_shape)
variable = variables_lib.Variable(array_ops.zeros((1, 3)))
grad_to_var = (gradient, variable)
gradient_multipliers = {variable: self._multiplier}
[grad_to_var] = learning.multiply_gradients([grad_to_var],
gradient_multipliers)
# Ensure the built IndexedSlice has the right form.
self.assertEqual(grad_to_var[1], variable)
self.assertEqual(grad_to_var[0].indices, indices)
self.assertEqual(grad_to_var[0].dense_shape, dense_shape)
with self.test_session() as sess:
actual_gradient = sess.run(grad_to_var[0].values)
np_testing.assert_almost_equal(actual_gradient, self._multiplied_grad_vec,
5)
def testTensorMultiplierOfGradient(self):
gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
variable = variables_lib.Variable(array_ops.zeros_like(gradient))
multiplier_flag = variables_lib.Variable(True)
tensor_multiplier = array_ops.where(multiplier_flag,
self._multiplier,
1.0)
grad_to_var = (gradient, variable)
gradient_multipliers = {variable: tensor_multiplier}
[grad_to_var] = learning.multiply_gradients([grad_to_var],
gradient_multipliers)
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
gradient_true_flag = sess.run(grad_to_var[0])
sess.run(multiplier_flag.assign(False))
gradient_false_flag = sess.run(grad_to_var[0])
np_testing.assert_almost_equal(gradient_true_flag,
self._multiplied_grad_vec,
5)
np_testing.assert_almost_equal(gradient_false_flag,
self._grad_vec,
5)
def LogisticClassifier(inputs):
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
def BatchNormClassifier(inputs):
inputs = layers.batch_norm(inputs, decay=0.1)
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
class TrainBNClassifierTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
g = ops.Graph()
with g.as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = BatchNormClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10)
self.assertLess(loss, .1)
class CreateTrainOpTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.random.rand(16, 4).astype(np.float32)
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
def testUseUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
expected_mean = np.mean(self._inputs, axis=(0))
expected_var = np.var(self._inputs, axis=(0))
tf_predictions = BatchNormClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
moving_mean = variables_lib2.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib2.get_variables_by_name('moving_variance')[
0]
with session.Session() as sess:
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testEmptyUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = BatchNormClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer, update_ops=[])
moving_mean = variables_lib2.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib2.get_variables_by_name('moving_variance')[
0]
with session.Session() as sess:
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# Since we skip update_ops the moving_vars are not updated.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
def testUseGlobalStep(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = BatchNormClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
global_step = variables_lib2.get_or_create_global_step()
with session.Session() as sess:
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
for _ in range(10):
sess.run([train_op])
global_step = global_step.eval()
# After 10 updates global_step should be 10.
self.assertAllClose(global_step, 10)
def testNoneGlobalStep(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = BatchNormClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(
total_loss, optimizer, global_step=None)
global_step = variables_lib2.get_or_create_global_step()
with session.Session() as sess:
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
for _ in range(10):
sess.run([train_op])
global_step = global_step.eval()
# Since train_op don't use global_step it shouldn't change.
self.assertAllClose(global_step, 0)
def testRecordTrainOpInCollection(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
# Make sure the training op was recorded in the proper collection
self.assertTrue(train_op in ops.get_collection(ops.GraphKeys.TRAIN_OP))
class TrainTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testTrainWithNonDefaultGraph(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
g = ops.Graph()
with g.as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10, graph=g)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithNoneAsLogdir(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, None, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithSessionConfig(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
session_config = config_pb2.ConfigProto(allow_soft_placement=True)
loss = learning.train(
train_op,
None,
number_of_steps=300,
log_every_n_steps=10,
session_config=session_config)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithTrace(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
summary.scalar('total_loss', total_loss)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op,
logdir,
number_of_steps=300,
log_every_n_steps=10,
trace_every_n_steps=100)
self.assertIsNotNone(loss)
for trace_step in [1, 101, 201]:
trace_filename = 'tf_trace-%d.json' % trace_step
self.assertTrue(os.path.isfile(os.path.join(logdir, trace_filename)))
def testTrainWithNoneAsLogdirWhenUsingSummariesRaisesError(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
summary.scalar('total_loss', total_loss)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
summary_op = summary.merge_all()
with self.assertRaises(ValueError):
learning.train(
train_op, None, number_of_steps=300, summary_op=summary_op)
def testTrainWithNoneAsLogdirWhenUsingTraceRaisesError(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
with self.assertRaises(ValueError):
learning.train(
train_op, None, number_of_steps=300, trace_every_n_steps=10)
def testTrainWithNoneAsLogdirWhenUsingSaverRaisesError(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
with self.assertRaises(ValueError):
learning.train(
train_op, None, init_op=None, number_of_steps=300, saver=saver)
def testTrainWithNoneAsInitWhenUsingVarsRaisesError(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
with self.assertRaises(RuntimeError):
learning.train(train_op, logdir, init_op=None, number_of_steps=300)
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithLocalVariable(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
local_multiplier = variables_lib2.local_variable(1.0)
tf_predictions = LogisticClassifier(tf_inputs) * local_multiplier
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testResumeTrainAchievesRoughlyTheSameLoss(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
number_of_steps = [300, 301, 305]
for i in range(len(number_of_steps)):
with ops.Graph().as_default():
random_seed.set_random_seed(i)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op,
logdir,
number_of_steps=number_of_steps[i],
log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=learning_rate)
if gradient_multiplier != 1.0:
variables = variables_lib.trainable_variables()
gradient_multipliers = {var: gradient_multiplier for var in variables}
else:
gradient_multipliers = None
return learning.create_train_op(
total_loss, optimizer, gradient_multipliers=gradient_multipliers)
def testTrainWithInitFromCheckpoint(self):
logdir1 = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs1')
logdir2 = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs2')
# First, train the model one step (make sure the error is high).
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op()
loss = learning.train(train_op, logdir1, number_of_steps=1)
self.assertGreater(loss, .5)
# Next, train the model to convergence.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
train_op = self.create_train_op()
loss = learning.train(
train_op, logdir1, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
# Finally, advance the model a single step and validate that the loss is
# still low.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
train_op = self.create_train_op()
model_variables = variables_lib.global_variables()
model_path = os.path.join(logdir1, 'model.ckpt-300')
init_op = variables_lib.global_variables_initializer()
op, init_feed_dict = variables_lib2.assign_from_checkpoint(
model_path, model_variables)
def InitAssignFn(sess):
sess.run(op, init_feed_dict)
loss = learning.train(
train_op,
logdir2,
number_of_steps=1,
init_op=init_op,
init_fn=InitAssignFn)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
def testTrainWithInitFromFn(self):
logdir1 = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs1')
logdir2 = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs2')
# First, train the model one step (make sure the error is high).
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op()
loss = learning.train(train_op, logdir1, number_of_steps=1)
self.assertGreater(loss, .5)
# Next, train the model to convergence.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
train_op = self.create_train_op()
loss = learning.train(
train_op, logdir1, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
# Finally, advance the model a single step and validate that the loss is
# still low.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
train_op = self.create_train_op()
model_variables = variables_lib.global_variables()
model_path = os.path.join(logdir1, 'model.ckpt-300')
saver = saver_lib.Saver(model_variables)
def RestoreFn(sess):
saver.restore(sess, model_path)
loss = learning.train(
train_op, logdir2, number_of_steps=1, init_fn=RestoreFn)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def ModelLoss(self):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
return loss_ops.get_total_loss()
def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
logdir1 = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs1')
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights = variables_lib2.get_variables_by_name('weights')
train_op = learning.create_train_op(
total_loss, optimizer, variables_to_train=weights)
loss = learning.train(
train_op, logdir1, number_of_steps=200, log_every_n_steps=10)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Next, train the biases of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
biases = variables_lib2.get_variables_by_name('biases')
train_op = learning.create_train_op(
total_loss, optimizer, variables_to_train=biases)
loss = learning.train(
train_op, logdir1, number_of_steps=300, log_every_n_steps=10)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Finally, train both weights and bias to get lower loss.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, logdir1, number_of_steps=400, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainingSubsetsOfVariablesOnlyUpdatesThoseVariables(self):
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights, biases = variables_lib2.get_variables()
train_op = learning.create_train_op(total_loss, optimizer)
train_weights = learning.create_train_op(
total_loss, optimizer, variables_to_train=[weights])
train_biases = learning.create_train_op(
total_loss, optimizer, variables_to_train=[biases])
with session.Session() as sess:
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Get the initial weights and biases values.
weights_values, biases_values = sess.run([weights, biases])
self.assertGreater(np.linalg.norm(weights_values), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values), 0)
# Update weights and biases.
loss = sess.run(train_op)
self.assertGreater(loss, .5)
new_weights, new_biases = sess.run([weights, biases])
# Check that the weights and biases have been updated.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
weights_values, biases_values = new_weights, new_biases
# Update only weights.
loss = sess.run(train_weights)
self.assertGreater(loss, .5)
new_weights, new_biases = sess.run([weights, biases])
# Check that the weights have been updated, but biases have not.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values - new_biases), 0)
weights_values = new_weights
# Update only biases.
loss = sess.run(train_biases)
self.assertGreater(loss, .5)
new_weights, new_biases = sess.run([weights, biases])
# Check that the biases have been updated, but weights have not.
self.assertAlmostEqual(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
def testTrainWithAlteredGradients(self):
# Use the same learning rate but different gradient multipliers
# to train two models. Model with equivalently larger learning
# rate (i.e., learning_rate * gradient_multiplier) has smaller
# training loss.
logdir1 = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs1')
logdir2 = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs2')
multipliers = [1., 1000.]
number_of_steps = 10
losses = []
learning_rate = 0.001
# First, train the model with equivalently smaller learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[0])
loss = learning.train(train_op, logdir1, number_of_steps=number_of_steps)
losses.append(loss)
self.assertGreater(loss, .5)
# Second, train the model with equivalently larger learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[1])
loss = learning.train(train_op, logdir2, number_of_steps=number_of_steps)
losses.append(loss)
self.assertIsNotNone(loss)
self.assertLess(loss, .5)
# The loss of the model trained with larger learning rate should
# be smaller.
self.assertGreater(losses[0], losses[1])
def testTrainWithEpochLimit(self):
logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_inputs_limited = input_lib.limit_epochs(tf_inputs, num_epochs=300)
tf_labels_limited = input_lib.limit_epochs(tf_labels, num_epochs=300)
tf_predictions = LogisticClassifier(tf_inputs_limited)
loss_ops.log_loss(tf_predictions, tf_labels_limited)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(train_op, logdir, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
self.assertTrue(os.path.isfile('{}/model.ckpt-300.index'.format(logdir)))
self.assertTrue(os.path.isfile('{}/model.ckpt-300.data-00000-of-00001'.format(logdir)))
if __name__ == '__main__':
test.main()
|
apache-2.0
|
elkingtonmcb/nupic
|
src/encoders/adaptivescalar.py
|
32
|
8059
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import math
import numpy as np
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.encoders.scalar import ScalarEncoder
from nupic.utils import MovingAverage
class AdaptiveScalarEncoder(ScalarEncoder):
"""
This is an implementation of the scalar encoder that adapts the min and
max of the scalar encoder dynamically. This is essential to the streaming
model of the online prediction framework.
Initialization of an adapive encoder using resolution or radius is not supported;
it must be intitialized with n. This n is kept constant while the min and max of the
encoder changes.
The adaptive encoder must be have periodic set to false.
The adaptive encoder may be initialized with a minval and maxval or with `None`
for each of these. In the latter case, the min and max are set as the 1st and 99th
percentile over a window of the past 100 records.
**Note:** the sliding window may record duplicates of the values in the dataset,
and therefore does not reflect the statistical distribution of the input data
and may not be used to calculate the median, mean etc.
"""
def __init__(self, w, minval=None, maxval=None, periodic=False, n=0, radius=0,
resolution=0, name=None, verbosity=0, clipInput=True, forced=False):
"""
[overrides nupic.encoders.scalar.ScalarEncoder.__init__]
"""
self._learningEnabled = True
if periodic:
#Adaptive scalar encoders take non-periodic inputs only
raise Exception('Adaptive scalar encoder does not encode periodic inputs')
assert n!=0 #An adaptive encoder can only be intialized using n
super(AdaptiveScalarEncoder, self).__init__(w=w, n=n, minval=minval, maxval=maxval,
clipInput=True, name=name, verbosity=verbosity, forced=forced)
self.recordNum=0 #how many inputs have been sent to the encoder?
self.slidingWindow = MovingAverage(300)
def _setEncoderParams(self):
"""
Set the radius, resolution and range. These values are updated when minval
and/or maxval change.
"""
self.rangeInternal = float(self.maxval - self.minval)
self.resolution = float(self.rangeInternal) / (self.n - self.w)
self.radius = self.w * self.resolution
self.range = self.rangeInternal + self.resolution
# nInternal represents the output area excluding the possible padding on each side
self.nInternal = self.n - 2 * self.padding
# Invalidate the bucket values cache so that they get recomputed
self._bucketValues = None
def setFieldStats(self, fieldName, fieldStats):
"""
TODO: document
"""
#If the stats are not fully formed, ignore.
if fieldStats[fieldName]['min'] == None or \
fieldStats[fieldName]['max'] == None:
return
self.minval = fieldStats[fieldName]['min']
self.maxval = fieldStats[fieldName]['max']
if self.minval == self.maxval:
self.maxval+=1
self._setEncoderParams()
def _setMinAndMax(self, input, learn):
"""
Potentially change the minval and maxval using input.
**The learn flag is currently not supported by cla regions.**
"""
self.slidingWindow.next(input)
if self.minval is None and self.maxval is None:
self.minval = input
self.maxval = input+1 #When the min and max and unspecified and only one record has been encoded
self._setEncoderParams()
elif learn:
sorted = self.slidingWindow.getSlidingWindow()
sorted.sort()
minOverWindow = sorted[0]
maxOverWindow = sorted[len(sorted)-1]
if minOverWindow < self.minval:
#initialBump = abs(self.minval-minOverWindow)*(1-(min(self.recordNum, 200.0)/200.0))*2 #decrement minval more aggressively in the beginning
if self.verbosity >= 2:
print "Input %s=%.2f smaller than minval %.2f. Adjusting minval to %.2f"\
% (self.name, input, self.minval, minOverWindow)
self.minval = minOverWindow #-initialBump
self._setEncoderParams()
if maxOverWindow > self.maxval:
#initialBump = abs(self.maxval-maxOverWindow)*(1-(min(self.recordNum, 200.0)/200.0))*2 #decrement maxval more aggressively in the beginning
if self.verbosity >= 2:
print "Input %s=%.2f greater than maxval %.2f. Adjusting maxval to %.2f" \
% (self.name, input, self.maxval, maxOverWindow)
self.maxval = maxOverWindow #+initialBump
self._setEncoderParams()
def getBucketIndices(self, input, learn=None):
"""
[overrides nupic.encoders.scalar.ScalarEncoder.getBucketIndices]
"""
self.recordNum +=1
if learn is None:
learn = self._learningEnabled
if type(input) is float and math.isnan(input):
input = SENTINEL_VALUE_FOR_MISSING_DATA
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
return [None]
else:
self._setMinAndMax(input, learn)
return super(AdaptiveScalarEncoder, self).getBucketIndices(input)
def encodeIntoArray(self, input, output,learn=None):
"""
[overrides nupic.encoders.scalar.ScalarEncoder.encodeIntoArray]
"""
self.recordNum +=1
if learn is None:
learn = self._learningEnabled
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
output[0:self.n] = 0
elif not math.isnan(input):
self._setMinAndMax(input, learn)
super(AdaptiveScalarEncoder, self).encodeIntoArray(input, output)
def getBucketInfo(self, buckets):
"""
[overrides nupic.encoders.scalar.ScalarEncoder.getBucketInfo]
"""
if self.minval is None or self.maxval is None:
return [EncoderResult(value=0, scalar=0,
encoding=numpy.zeros(self.n))]
return super(AdaptiveScalarEncoder, self).getBucketInfo(buckets)
def topDownCompute(self, encoded):
"""
[overrides nupic.encoders.scalar.ScalarEncoder.topDownCompute]
"""
if self.minval is None or self.maxval is None:
return [EncoderResult(value=0, scalar=0,
encoding=numpy.zeros(self.n))]
return super(AdaptiveScalarEncoder, self).topDownCompute(encoded)
def dump(self):
"""
Prints details about current state to stdout.
"""
print "AdaptiveScalarEncoder:"
print " min: %f" % self.minval
print " max: %f" % self.maxval
print " w: %d" % self.w
print " n: %d" % self.n
print " resolution: %f" % self.resolution
print " radius: %f" % self.radius
print " periodic: %s" % self.periodic
print " nInternal: %d" % self.nInternal
print " rangeInternal: %f" % self.rangeInternal
print " padding: %d" % self.padding
@classmethod
def read(cls, proto):
encoder = super(AdaptiveScalarEncoder, cls).read(proto)
encoder.recordNum = proto.recordNum
encoder.slidingWindow = MovingAverage.read(proto.slidingWindow)
return encoder
def write(self, proto):
super(AdaptiveScalarEncoder, self).write(proto)
proto.recordNum = self.recordNum
self.slidingWindow.write(proto.slidingWindow)
|
agpl-3.0
|
linearregression/osquery
|
tools/codegen/gentargets.py
|
42
|
2749
|
#!/usr/bin/env python
import argparse
import json
import logging
import os
logging_format = '[%(levelname)s] %(message)s'
logging.basicConfig(level=logging.INFO, format=logging_format)
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_ROOT_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, "../.."))
def get_files_to_compile(json_data):
files_to_compile = []
for element in json_data:
filename = element["file"]
if not filename.endswith("_tests.cpp") and \
"third-party" not in filename and \
"example" not in filename and \
"generated/gen" not in filename:
base = filename.rfind("osquery/")
filename = filename[base + len("osquery/"):]
base_generated = filename.rfind("generated/")
if base_generated >= 0:
filename = filename[base_generated:]
files_to_compile.append(filename)
return files_to_compile
TARGETS_PREAMBLE = """
# DO NOT EDIT
# Automatically generated: make sync
thrift_library(
name="if",
languages=[
"cpp",
"py",
],
thrift_srcs={
"extensions/osquery.thrift": ["Extension", "ExtensionManager"],
},
)
cpp_library(
name="osquery_sdk",
srcs=["""
TARGETS_POSTSCRIPT = """ ],
deps=[
"@/thrift/lib/cpp/concurrency",
":if-cpp",
],
external_deps=[
"boost",
"glog",
"gflags",
"gtest",
"rocksdb",
"libuuid",
],
compiler_flags=[
"-Wno-unused-function",
"-Wno-non-virtual-dtor",
"-Wno-address",
"-Wno-overloaded-virtual",
"-DOSQUERY_BUILD_VERSION=%s",
"-DOSQUERY_BUILD_SDK_VERSION=%s",
"-DOSQUERY_THRIFT_LIB=thrift/lib/cpp",
"-DOSQUERY_THRIFT_SERVER_LIB=thrift/lib/cpp/server/example",
"-DOSQUERY_THRIFT_POINTER=std",
"-DOSQUERY_THRIFT=osquery/gen-cpp/",
],
)
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=(
"Generate a TARGETS files from CMake metadata"
))
parser.add_argument("--input", "-i", required=True)
parser.add_argument("--version", "-v", required=True)
parser.add_argument("--sdk", required=True)
args = parser.parse_args()
try:
with open(args.input, "r") as f:
try:
json_data = json.loads(f.read())
except ValueError:
logging.critical("Error: %s is not valid JSON" % args.input)
source_files = get_files_to_compile(json_data)
print(TARGETS_PREAMBLE)
for source_file in source_files:
print(" \"%s\"," % source_file)
print(TARGETS_POSTSCRIPT % (args.version, args.sdk))
except IOError:
logging.critical("Error: %s doesn't exist" % args.input)
|
bsd-3-clause
|
liamgh/liamgreenhughes-sl4a-tf101
|
python/src/Lib/test/test_tuple.py
|
59
|
3024
|
from test import test_support, seq_tests
class TupleTest(seq_tests.CommonTest):
type2test = tuple
def test_constructors(self):
super(TupleTest, self).test_len()
# calling built-in types without argument must return empty
self.assertEqual(tuple(), ())
t0_3 = (0, 1, 2, 3)
t0_3_bis = tuple(t0_3)
self.assert_(t0_3 is t0_3_bis)
self.assertEqual(tuple([]), ())
self.assertEqual(tuple([0, 1, 2, 3]), (0, 1, 2, 3))
self.assertEqual(tuple(''), ())
self.assertEqual(tuple('spam'), ('s', 'p', 'a', 'm'))
def test_truth(self):
super(TupleTest, self).test_truth()
self.assert_(not ())
self.assert_((42, ))
def test_len(self):
super(TupleTest, self).test_len()
self.assertEqual(len(()), 0)
self.assertEqual(len((0,)), 1)
self.assertEqual(len((0, 1, 2)), 3)
def test_iadd(self):
super(TupleTest, self).test_iadd()
u = (0, 1)
u2 = u
u += (2, 3)
self.assert_(u is not u2)
def test_imul(self):
super(TupleTest, self).test_imul()
u = (0, 1)
u2 = u
u *= 3
self.assert_(u is not u2)
def test_tupleresizebug(self):
# Check that a specific bug in _PyTuple_Resize() is squashed.
def f():
for i in range(1000):
yield i
self.assertEqual(list(tuple(f())), range(1000))
def test_hash(self):
# See SF bug 942952: Weakness in tuple hash
# The hash should:
# be non-commutative
# should spread-out closely spaced values
# should not exhibit cancellation in tuples like (x,(x,y))
# should be distinct from element hashes: hash(x)!=hash((x,))
# This test exercises those cases.
# For a pure random hash and N=50, the expected number of occupied
# buckets when tossing 252,600 balls into 2**32 buckets
# is 252,592.6, or about 7.4 expected collisions. The
# standard deviation is 2.73. On a box with 64-bit hash
# codes, no collisions are expected. Here we accept no
# more than 15 collisions. Any worse and the hash function
# is sorely suspect.
N=50
base = range(N)
xp = [(i, j) for i in base for j in base]
inps = base + [(i, j) for i in base for j in xp] + \
[(i, j) for i in xp for j in base] + xp + zip(base)
collisions = len(inps) - len(set(map(hash, inps)))
self.assert_(collisions <= 15)
def test_repr(self):
l0 = tuple()
l2 = (0, 1, 2)
a0 = self.type2test(l0)
a2 = self.type2test(l2)
self.assertEqual(str(a0), repr(l0))
self.assertEqual(str(a2), repr(l2))
self.assertEqual(repr(a0), "()")
self.assertEqual(repr(a2), "(0, 1, 2)")
def test_main():
test_support.run_unittest(TupleTest)
if __name__=="__main__":
test_main()
|
apache-2.0
|
cihai/cihai-python
|
tests/test_conversion.py
|
1
|
3543
|
# -*- coding: utf-8 -*-
"""Tests for cihai.
test.conversion
~~~~~~~~~~~~~~~
"""
from __future__ import absolute_import, print_function, unicode_literals
from cihai import conversion
from cihai._compat import string_types, text_type
def test_text_type():
c1 = '(same as U+7A69 穩) firm; stable; secure'
c2 = text_type()
assert isinstance(c1, string_types)
assert isinstance(c2, text_type)
"""Return UCN character from Python Unicode character.
Converts a one character Python unicode string (e.g. u'\\u4e00') to the
corresponding Unicode UCN ('U+4E00').
U+369D kSemanticVariant U+595E<kMatthews U+594E<kMatthews
U+3CE2 kTraditionalVariant U+23FB7
U+3FF7 kSemanticVariant U+7CD9<kMatthews,kMeyerWempe
U+345A kDefinition (non-classical form of 那) that, there
U+349A kDefinition (same as U+7A69 穩) firm; stable; secure,
dependent upon others
U+34B5 kMandarin mào
U+356D kCantonese au3 jaau1
"""
def test_ucn_from_unicode():
text = '一'
python_unicode = u'\u4e00'
expected = "U+4E00"
bytes_expected = b"U+4E00"
assert conversion.python_to_ucn(python_unicode) == expected
assert isinstance(conversion.python_to_ucn(python_unicode), text_type)
assert isinstance(conversion.python_to_ucn(python_unicode, as_bytes=True), bytes)
assert conversion.python_to_ucn(text, as_bytes=True) == bytes_expected
def test_ucn_from_unicode_16():
text = '𦄀'
python_unicode = u'\u26100'
expected = "U+26100"
bytes_expected = b"U+26100"
assert conversion.python_to_ucn(python_unicode) == expected
assert isinstance(conversion.python_to_ucn(python_unicode), text_type)
assert isinstance(conversion.python_to_ucn(python_unicode, as_bytes=True), bytes)
assert conversion.python_to_ucn(text, as_bytes=True) == bytes_expected
def test_ucn_to_unicode():
before = 'U+4E00'
expected = '\u4e00'
result = conversion.ucn_to_unicode(before)
assert result == expected
assert isinstance(result, text_type)
# wide character
before = 'U+20001'
expected = '\U00020001'
result = conversion.ucn_to_unicode(before)
assert result == expected
assert isinstance(result, text_type)
before = '(same as U+7A69 穩) firm; stable; secure'
expected = '(same as 穩 穩) firm; stable; secure'
result = conversion.ucnstring_to_unicode(before)
assert result == expected
assert isinstance(result, text_type)
"""Return EUC character from a Python Unicode character.
Converts a one character Python unicode string (e.g. u'\\u4e00') to the
corresponding EUC hex ('d2bb').
"""
def test_hexd():
assert conversion.hexd(0xFFFF) == 'ffff'
def test_euc_from_unicode():
expected = '一' # u'\u4e00'
euc_bytestring = b'd2bb'
euc_unicode = 'd2bb'
result = conversion.python_to_euc(expected, as_bytes=True)
assert euc_bytestring == result
assert isinstance(result, bytes)
result = conversion.python_to_euc(expected)
assert euc_unicode == result
assert isinstance(result, text_type)
def test_euc_to_utf8():
expected = '一'
euc_bytestring = b'b0ec'
result = conversion.euc_to_utf8(euc_bytestring)
assert expected == result
def test_euc_to_unicode():
expected = '一'
expected_ustring = u'\u4e00'
euc_bytestring = b'd2bb'
result = conversion.euc_to_unicode(euc_bytestring)
assert expected == expected_ustring
assert isinstance(result, text_type)
assert expected == result
assert expected_ustring == result
|
bsd-3-clause
|
sajeeshcs/nested_projects_keystone
|
keystone/credential/controllers.py
|
1
|
4503
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
from keystone.common import controller
from keystone.common import dependency
from keystone.common import validation
from keystone.credential import schema
from keystone import exception
from keystone.i18n import _
from keystone.openstack.common import jsonutils
@dependency.requires('credential_api')
class CredentialV3(controller.V3Controller):
collection_name = 'credentials'
member_name = 'credential'
def __init__(self):
super(CredentialV3, self).__init__()
self.get_member_from_driver = self.credential_api.get_credential
def _assign_unique_id(self, ref, trust_id=None):
# Generates and assigns a unique identifier to
# a credential reference.
if ref.get('type', '').lower() == 'ec2':
try:
blob = jsonutils.loads(ref.get('blob'))
except (ValueError, TypeError):
raise exception.ValidationError(
message=_('Invalid blob in credential'))
if not blob or not isinstance(blob, dict):
raise exception.ValidationError(attribute='blob',
target='credential')
if blob.get('access') is None:
raise exception.ValidationError(attribute='access',
target='blob')
ret_ref = ref.copy()
ret_ref['id'] = hashlib.sha256(blob['access']).hexdigest()
# Update the blob with the trust_id, so credentials created
# with a trust scoped token will result in trust scoped
# tokens when authentication via ec2tokens happens
if trust_id is not None:
blob['trust_id'] = trust_id
ret_ref['blob'] = jsonutils.dumps(blob)
return ret_ref
else:
return super(CredentialV3, self)._assign_unique_id(ref)
@controller.protected()
@validation.validated(schema.credential_create, 'credential')
def create_credential(self, context, credential):
trust_id = self._get_trust_id_for_request(context)
ref = self._assign_unique_id(self._normalize_dict(credential),
trust_id)
ref = self.credential_api.create_credential(ref['id'], ref)
return CredentialV3.wrap_member(context, ref)
@staticmethod
def _blob_to_json(ref):
# credentials stored via ec2tokens before the fix for #1259584
# need json serializing, as that's the documented API format
blob = ref.get('blob')
if isinstance(blob, dict):
new_ref = ref.copy()
new_ref['blob'] = jsonutils.dumps(blob)
return new_ref
else:
return ref
@controller.filterprotected('user_id')
def list_credentials(self, context, filters):
hints = CredentialV3.build_driver_hints(context, filters)
refs = self.credential_api.list_credentials(hints)
ret_refs = [self._blob_to_json(r) for r in refs]
return CredentialV3.wrap_collection(context, ret_refs,
hints=hints)
@controller.protected()
def get_credential(self, context, credential_id):
ref = self.credential_api.get_credential(credential_id)
ret_ref = self._blob_to_json(ref)
return CredentialV3.wrap_member(context, ret_ref)
@controller.protected()
@validation.validated(schema.credential_update, 'credential')
def update_credential(self, context, credential_id, credential):
self._require_matching_id(credential_id, credential)
ref = self.credential_api.update_credential(credential_id, credential)
return CredentialV3.wrap_member(context, ref)
@controller.protected()
def delete_credential(self, context, credential_id):
return self.credential_api.delete_credential(credential_id)
|
apache-2.0
|
mancoast/CPythonPyc_test
|
cpython/254_test_glob.py
|
19
|
3723
|
import unittest
from test.test_support import run_unittest, TESTFN
import glob
import os
import shutil
class GlobTests(unittest.TestCase):
def norm(self, *parts):
return os.path.normpath(os.path.join(self.tempdir, *parts))
def mktemp(self, *parts):
filename = self.norm(*parts)
base, file = os.path.split(filename)
if not os.path.exists(base):
os.makedirs(base)
f = open(filename, 'w')
f.close()
def setUp(self):
self.tempdir = TESTFN+"_dir"
self.mktemp('a', 'D')
self.mktemp('aab', 'F')
self.mktemp('aaa', 'zzzF')
self.mktemp('ZZZ')
self.mktemp('a', 'bcd', 'EF')
self.mktemp('a', 'bcd', 'efg', 'ha')
if hasattr(os, 'symlink'):
os.symlink(self.norm('broken'), self.norm('sym1'))
os.symlink(self.norm('broken'), self.norm('sym2'))
def tearDown(self):
shutil.rmtree(self.tempdir)
def glob(self, *parts):
if len(parts) == 1:
pattern = parts[0]
else:
pattern = os.path.join(*parts)
p = os.path.join(self.tempdir, pattern)
res = glob.glob(p)
self.assertEqual(list(glob.iglob(p)), res)
return res
def assertSequencesEqual_noorder(self, l1, l2):
self.assertEqual(set(l1), set(l2))
def test_glob_literal(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('a'), [self.norm('a')])
eq(self.glob('a', 'D'), [self.norm('a', 'D')])
eq(self.glob('aab'), [self.norm('aab')])
eq(self.glob('zymurgy'), [])
def test_glob_one_directory(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('a*'), map(self.norm, ['a', 'aab', 'aaa']))
eq(self.glob('*a'), map(self.norm, ['a', 'aaa']))
eq(self.glob('aa?'), map(self.norm, ['aaa', 'aab']))
eq(self.glob('aa[ab]'), map(self.norm, ['aaa', 'aab']))
eq(self.glob('*q'), [])
def test_glob_nested_directory(self):
eq = self.assertSequencesEqual_noorder
if os.path.normcase("abCD") == "abCD":
# case-sensitive filesystem
eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF')])
else:
# case insensitive filesystem
eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF'),
self.norm('a', 'bcd', 'efg')])
eq(self.glob('a', 'bcd', '*g'), [self.norm('a', 'bcd', 'efg')])
def test_glob_directory_names(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('*', 'D'), [self.norm('a', 'D')])
eq(self.glob('*', '*a'), [])
eq(self.glob('a', '*', '*', '*a'),
[self.norm('a', 'bcd', 'efg', 'ha')])
eq(self.glob('?a?', '*F'), map(self.norm, [os.path.join('aaa', 'zzzF'),
os.path.join('aab', 'F')]))
def test_glob_directory_with_trailing_slash(self):
# We are verifying that when there is wildcard pattern which
# ends with os.sep doesn't blow up.
res = glob.glob(self.tempdir + '*' + os.sep)
self.assertEqual(len(res), 1)
# either of these results are reasonable
self.assertTrue(res[0] in [self.tempdir, self.tempdir + os.sep])
def test_glob_broken_symlinks(self):
if hasattr(os, 'symlink'):
eq = self.assertSequencesEqual_noorder
eq(self.glob('sym*'), [self.norm('sym1'), self.norm('sym2')])
eq(self.glob('sym1'), [self.norm('sym1')])
eq(self.glob('sym2'), [self.norm('sym2')])
def test_main():
run_unittest(GlobTests)
if __name__ == "__main__":
test_main()
|
gpl-3.0
|
kevalds51/sympy
|
sympy/simplify/tests/test_sqrtdenest.py
|
98
|
6451
|
from sympy import sqrt, root, S, Symbol, sqrtdenest, Integral, cos
from sympy.simplify.sqrtdenest import _subsets as subsets
r2, r3, r5, r6, r7, r10, r15, r29 = [sqrt(x) for x in [2, 3, 5, 6, 7, 10,
15, 29]]
def test_sqrtdenest():
d = {sqrt(5 + 2 * r6): r2 + r3,
sqrt(5. + 2 * r6): sqrt(5. + 2 * r6),
sqrt(5. + 4*sqrt(5 + 2 * r6)): sqrt(5.0 + 4*r2 + 4*r3),
sqrt(r2): sqrt(r2),
sqrt(5 + r7): sqrt(5 + r7),
sqrt(3 + sqrt(5 + 2*r7)):
3*r2*(5 + 2*r7)**(S(1)/4)/(2*sqrt(6 + 3*r7)) +
r2*sqrt(6 + 3*r7)/(2*(5 + 2*r7)**(S(1)/4)),
sqrt(3 + 2*r3): 3**(S(3)/4)*(r6/2 + 3*r2/2)/3}
for i in d:
assert sqrtdenest(i) == d[i]
def test_sqrtdenest2():
assert sqrtdenest(sqrt(16 - 2*r29 + 2*sqrt(55 - 10*r29))) == \
r5 + sqrt(11 - 2*r29)
e = sqrt(-r5 + sqrt(-2*r29 + 2*sqrt(-10*r29 + 55) + 16))
assert sqrtdenest(e) == root(-2*r29 + 11, 4)
r = sqrt(1 + r7)
assert sqrtdenest(sqrt(1 + r)) == sqrt(1 + r)
e = sqrt(((1 + sqrt(1 + 2*sqrt(3 + r2 + r5)))**2).expand())
assert sqrtdenest(e) == 1 + sqrt(1 + 2*sqrt(r2 + r5 + 3))
assert sqrtdenest(sqrt(5*r3 + 6*r2)) == \
sqrt(2)*root(3, 4) + root(3, 4)**3
assert sqrtdenest(sqrt(((1 + r5 + sqrt(1 + r3))**2).expand())) == \
1 + r5 + sqrt(1 + r3)
assert sqrtdenest(sqrt(((1 + r5 + r7 + sqrt(1 + r3))**2).expand())) == \
1 + sqrt(1 + r3) + r5 + r7
e = sqrt(((1 + cos(2) + cos(3) + sqrt(1 + r3))**2).expand())
assert sqrtdenest(e) == cos(3) + cos(2) + 1 + sqrt(1 + r3)
e = sqrt(-2*r10 + 2*r2*sqrt(-2*r10 + 11) + 14)
assert sqrtdenest(e) == sqrt(-2*r10 - 2*r2 + 4*r5 + 14)
# check that the result is not more complicated than the input
z = sqrt(-2*r29 + cos(2) + 2*sqrt(-10*r29 + 55) + 16)
assert sqrtdenest(z) == z
assert sqrtdenest(sqrt(r6 + sqrt(15))) == sqrt(r6 + sqrt(15))
z = sqrt(15 - 2*sqrt(31) + 2*sqrt(55 - 10*r29))
assert sqrtdenest(z) == z
def test_sqrtdenest_rec():
assert sqrtdenest(sqrt(-4*sqrt(14) - 2*r6 + 4*sqrt(21) + 33)) == \
-r2 + r3 + 2*r7
assert sqrtdenest(sqrt(-28*r7 - 14*r5 + 4*sqrt(35) + 82)) == \
-7 + r5 + 2*r7
assert sqrtdenest(sqrt(6*r2/11 + 2*sqrt(22)/11 + 6*sqrt(11)/11 + 2)) == \
sqrt(11)*(r2 + 3 + sqrt(11))/11
assert sqrtdenest(sqrt(468*r3 + 3024*r2 + 2912*r6 + 19735)) == \
9*r3 + 26 + 56*r6
z = sqrt(-490*r3 - 98*sqrt(115) - 98*sqrt(345) - 2107)
assert sqrtdenest(z) == sqrt(-1)*(7*r5 + 7*r15 + 7*sqrt(23))
z = sqrt(-4*sqrt(14) - 2*r6 + 4*sqrt(21) + 34)
assert sqrtdenest(z) == z
assert sqrtdenest(sqrt(-8*r2 - 2*r5 + 18)) == -r10 + 1 + r2 + r5
assert sqrtdenest(sqrt(8*r2 + 2*r5 - 18)) == \
sqrt(-1)*(-r10 + 1 + r2 + r5)
assert sqrtdenest(sqrt(8*r2/3 + 14*r5/3 + S(154)/9)) == \
-r10/3 + r2 + r5 + 3
assert sqrtdenest(sqrt(sqrt(2*r6 + 5) + sqrt(2*r7 + 8))) == \
sqrt(1 + r2 + r3 + r7)
assert sqrtdenest(sqrt(4*r15 + 8*r5 + 12*r3 + 24)) == 1 + r3 + r5 + r15
w = 1 + r2 + r3 + r5 + r7
assert sqrtdenest(sqrt((w**2).expand())) == w
z = sqrt((w**2).expand() + 1)
assert sqrtdenest(z) == z
z = sqrt(2*r10 + 6*r2 + 4*r5 + 12 + 10*r15 + 30*r3)
assert sqrtdenest(z) == z
def test_issue_6241():
z = sqrt( -320 + 32*sqrt(5) + 64*r15)
assert sqrtdenest(z) == z
def test_sqrtdenest3():
z = sqrt(13 - 2*r10 + 2*r2*sqrt(-2*r10 + 11))
assert sqrtdenest(z) == -1 + r2 + r10
assert sqrtdenest(z, max_iter=1) == -1 + sqrt(2) + sqrt(10)
n = sqrt(2*r6/7 + 2*r7/7 + 2*sqrt(42)/7 + 2)
d = sqrt(16 - 2*r29 + 2*sqrt(55 - 10*r29))
assert sqrtdenest(n/d).equals(
r7*(1 + r6 + r7)/(7*(sqrt(-2*r29 + 11) + r5)))
z = sqrt(sqrt(r2 + 2) + 2)
assert sqrtdenest(z) == z
assert sqrtdenest(sqrt(-2*r10 + 4*r2*sqrt(-2*r10 + 11) + 20)) == \
sqrt(-2*r10 - 4*r2 + 8*r5 + 20)
assert sqrtdenest(sqrt((112 + 70*r2) + (46 + 34*r2)*r5)) == \
r10 + 5 + 4*r2 + 3*r5
z = sqrt(5 + sqrt(2*r6 + 5)*sqrt(-2*r29 + 2*sqrt(-10*r29 + 55) + 16))
r = sqrt(-2*r29 + 11)
assert sqrtdenest(z) == sqrt(r2*r + r3*r + r10 + r15 + 5)
def test_sqrtdenest4():
# see Denest_en.pdf in https://github.com/sympy/sympy/issues/3192
z = sqrt(8 - r2*sqrt(5 - r5) - sqrt(3)*(1 + r5))
z1 = sqrtdenest(z)
c = sqrt(-r5 + 5)
z1 = ((-r15*c - r3*c + c + r5*c - r6 - r2 + r10 + sqrt(30))/4).expand()
assert sqrtdenest(z) == z1
z = sqrt(2*r2*sqrt(r2 + 2) + 5*r2 + 4*sqrt(r2 + 2) + 8)
assert sqrtdenest(z) == r2 + sqrt(r2 + 2) + 2
w = 2 + r2 + r3 + (1 + r3)*sqrt(2 + r2 + 5*r3)
z = sqrt((w**2).expand())
assert sqrtdenest(z) == w.expand()
def test_sqrt_symbolic_denest():
x = Symbol('x')
z = sqrt(((1 + sqrt(sqrt(2 + x) + 3))**2).expand())
assert sqrtdenest(z) == sqrt((1 + sqrt(sqrt(2 + x) + 3))**2)
z = sqrt(((1 + sqrt(sqrt(2 + cos(1)) + 3))**2).expand())
assert sqrtdenest(z) == 1 + sqrt(sqrt(2 + cos(1)) + 3)
z = ((1 + cos(2))**4 + 1).expand()
assert sqrtdenest(z) == z
z = sqrt(((1 + sqrt(sqrt(2 + cos(3*x)) + 3))**2 + 1).expand())
assert sqrtdenest(z) == z
c = cos(3)
c2 = c**2
assert sqrtdenest(sqrt(2*sqrt(1 + r3)*c + c2 + 1 + r3*c2)) == \
-1 - sqrt(1 + r3)*c
ra = sqrt(1 + r3)
z = sqrt(20*ra*sqrt(3 + 3*r3) + 12*r3*ra*sqrt(3 + 3*r3) + 64*r3 + 112)
assert sqrtdenest(z) == z
def test_issue_5857():
from sympy.abc import x, y
z = sqrt(1/(4*r3 + 7) + 1)
ans = (r2 + r6)/(r3 + 2)
assert sqrtdenest(z) == ans
assert sqrtdenest(1 + z) == 1 + ans
assert sqrtdenest(Integral(z + 1, (x, 1, 2))) == \
Integral(1 + ans, (x, 1, 2))
assert sqrtdenest(x + sqrt(y)) == x + sqrt(y)
ans = (r2 + r6)/(r3 + 2)
assert sqrtdenest(z) == ans
assert sqrtdenest(1 + z) == 1 + ans
assert sqrtdenest(Integral(z + 1, (x, 1, 2))) == \
Integral(1 + ans, (x, 1, 2))
assert sqrtdenest(x + sqrt(y)) == x + sqrt(y)
def test_subsets():
assert subsets(1) == [[1]]
assert subsets(4) == [
[1, 0, 0, 0], [0, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 0], [1, 0, 1, 0],
[0, 1, 1, 0], [1, 1, 1, 0], [0, 0, 0, 1], [1, 0, 0, 1], [0, 1, 0, 1],
[1, 1, 0, 1], [0, 0, 1, 1], [1, 0, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]]
def test_issue_5653():
assert sqrtdenest(
sqrt(2 + sqrt(2 + sqrt(2)))) == sqrt(2 + sqrt(2 + sqrt(2)))
|
bsd-3-clause
|
blueboxgroup/nova
|
nova/volume/encryptors/base.py
|
61
|
1949
|
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_log import log as logging
import six
from nova import keymgr
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class VolumeEncryptor(object):
"""Base class to support encrypted volumes.
A VolumeEncryptor provides hooks for attaching and detaching volumes, which
are called immediately prior to attaching the volume to an instance and
immediately following detaching the volume from an instance. This class
performs no actions for either hook.
"""
def __init__(self, connection_info, **kwargs):
self._key_manager = keymgr.API()
self.encryption_key_id = kwargs.get('encryption_key_id')
def _get_key(self, context):
"""Retrieves the encryption key for the specified volume.
:param: the connection information used to attach the volume
"""
return self._key_manager.get_key(context, self.encryption_key_id)
@abc.abstractmethod
def attach_volume(self, context, **kwargs):
"""Hook called immediately prior to attaching a volume to an instance.
"""
pass
@abc.abstractmethod
def detach_volume(self, **kwargs):
"""Hook called immediately after detaching a volume from an instance.
"""
pass
|
apache-2.0
|
huggingface/pytorch-transformers
|
src/transformers/modeling_tf_outputs.py
|
2
|
42017
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import List, Optional, Tuple
import tensorflow as tf
from .file_utils import ModelOutput
@dataclass
class TFBaseModelOutput(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(tf.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFBaseModelOutputWithPooling(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`tf.Tensor` of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token) further processed by a
Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
prediction (classification) objective during pretraining.
This output is usually *not* a good summary of the semantic content of the input, you're often better with
averaging or pooling the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: tf.Tensor = None
pooler_output: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFBaseModelOutputWithPast(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
past_key_values (:obj:`List[tf.Tensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`tf.Tensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size,
num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFBaseModelOutputWithCrossAttentions(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(tf.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
"""
last_hidden_state: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
cross_attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFBaseModelOutputWithPastAndCrossAttentions(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
past_key_values (:obj:`List[tf.Tensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`tf.Tensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size,
num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(tf.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
"""
last_hidden_state: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
cross_attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFSeq2SeqModelOutput(ModelOutput):
"""
Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
decoding.
Args:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
past_key_values (:obj:`List[tf.Tensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`tf.Tensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size,
num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
last_hidden_state: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
decoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
decoder_attentions: Optional[Tuple[tf.Tensor]] = None
cross_attentions: Optional[Tuple[tf.Tensor]] = None
encoder_last_hidden_state: Optional[tf.Tensor] = None
encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
encoder_attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFCausalLMOutput(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(n,)`, `optional`, where n is the number of non-masked labels, returned when :obj:`labels` is provided):
Language modeling loss (for next-token prediction).
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFCausalLMOutputWithPast(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(n,)`, `optional`, where n is the number of non-masked labels, returned when :obj:`labels` is provided):
Language modeling loss (for next-token prediction).
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (:obj:`List[tf.Tensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`tf.Tensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size,
num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFMaskedLMOutput(ModelOutput):
"""
Base class for masked language models outputs.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(n,)`, `optional`, where n is the number of non-masked labels, returned when :obj:`labels` is provided):
Masked language modeling (MLM) loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFSeq2SeqLMOutput(ModelOutput):
"""
Base class for sequence-to-sequence language models outputs.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(n,)`, `optional`, where n is the number of non-masked labels, returned when :obj:`labels` is provided):
Language modeling loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (:obj:`List[tf.Tensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`tf.Tensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size,
num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
decoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
decoder_attentions: Optional[Tuple[tf.Tensor]] = None
cross_attentions: Optional[Tuple[tf.Tensor]] = None
encoder_last_hidden_state: Optional[tf.Tensor] = None
encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
encoder_attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFNextSentencePredictorOutput(ModelOutput):
"""
Base class for outputs of models predicting if two sentences are consecutive or not.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(n,)`, `optional`, where n is the number of non-masked labels, returned when :obj:`next_sentence_label` is provided):
Next sentence prediction loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFSequenceClassifierOutput(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(batch_size, )`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFSeq2SeqSequenceClassifierOutput(ModelOutput):
"""
Base class for outputs of sequence-to-sequence sentence classification models.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
past_key_values (:obj:`List[tf.Tensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`tf.Tensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size,
num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
encoder_last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
decoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
decoder_attentions: Optional[Tuple[tf.Tensor]] = None
encoder_last_hidden_state: Optional[tf.Tensor] = None
encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
encoder_attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFMultipleChoiceModelOutput(ModelOutput):
"""
Base class for outputs of multiple choice models.
Args:
loss (:obj:`tf.Tensor` of shape `(batch_size, )`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFTokenClassifierOutput(ModelOutput):
"""
Base class for outputs of token classification models.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(n,)`, `optional`, where n is the number of unmasked labels, returned when ``labels`` is provided) :
Classification loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFQuestionAnsweringModelOutput(ModelOutput):
"""
Base class for outputs of question answering models.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(batch_size, )`, `optional`, returned when :obj:`start_positions` and :obj:`end_positions` are provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Span-start scores (before SoftMax).
end_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
start_logits: tf.Tensor = None
end_logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFSeq2SeqQuestionAnsweringModelOutput(ModelOutput):
"""
Base class for outputs of sequence-to-sequence question answering models.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Span-start scores (before SoftMax).
end_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Span-end scores (before SoftMax).
past_key_values (:obj:`List[tf.Tensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`tf.Tensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size,
num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
encoder_last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
loss: Optional[tf.Tensor] = None
start_logits: tf.Tensor = None
end_logits: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
decoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
decoder_attentions: Optional[Tuple[tf.Tensor]] = None
encoder_last_hidden_state: Optional[tf.Tensor] = None
encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
encoder_attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFSequenceClassifierOutputWithPast(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(batch_size, )`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
past_key_values (:obj:`List[tf.Tensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`tf.Tensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size,
num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
``past_key_values`` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
|
apache-2.0
|
nerzhul/ansible
|
lib/ansible/plugins/lookup/random_choice.py
|
253
|
1226
|
# (c) 2013, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import random
from ansible.plugins.lookup import LookupBase
# useful for introducing chaos ... or just somewhat reasonably fair selection
# amongst available mirrors
#
# tasks:
# - debug: msg=$item
# with_random_choice:
# - one
# - two
# - three
class LookupModule(LookupBase):
def run(self, terms, inject=None, **kwargs):
return [ random.choice(terms) ]
|
gpl-3.0
|
guoxuesong/deepstacks
|
deepstacks/lasagne/utils.py
|
1
|
2721
|
#!/usr/bin/env python
# coding:utf-8
# vi:tabstop=4:shiftwidth=4:expandtab:sts=4
import theano
import lasagne
from join import join_layer as JoinLayer
from ..utils.curry import curry
floatX = theano.config.floatX
def ordered_errors(errors, m=None, prefix='', deterministic=False):
res = []
for t in errors:
if m is None:
res += [[prefix+t, map(curry(lasagne.layers.get_output,deterministic=deterministic), errors[t])]]
else:
tmp = map(lambda x: JoinLayer(x, m), errors[t])
res += [[prefix+t, map(curry(lasagne.layers.get_output,deterministic=deterministic), tmp)]]
return sorted(res, key=lambda x: x[0])
def get_loss(errors, watchpoints, loss0=None):
errors = ordered_errors(errors)
watch_errors = ordered_errors(watchpoints)
errors1 = []
watch_errors1 = []
train_watch_errors1 = []
tagslice = []
count = 0
valtagslice = []
valcount = 0
for tag, errs in errors:
errors1 += errs
tagslice += [[tag, slice(count, count+len(errs))]]
count += len(errs)
for tag, errs in watch_errors:
if tag.startswith('train:'):
train_watch_errors1 += errs
tagslice += [[tag, slice(count, count+len(errs))]]
count += len(errs)
else:
watch_errors1 += errs
valtagslice += [[tag, slice(valcount, valcount+len(errs))]]
valcount += len(errs)
errors1 = [errors1]
watch_errors1 = [watch_errors1]
train_watch_errors1 = [train_watch_errors1]
loss = loss0 if loss0 is not None else 0.0
losslist = []
vallosslist = []
tmp = 0.0
for ee in errors1:
for err in ee:
if err is not None:
tmp = err.mean(dtype=floatX)
losslist = losslist+[tmp]
loss = loss+tmp
for ee in watch_errors1:
for err in ee:
if err is not None:
tmp = err.mean(dtype=floatX)
vallosslist = vallosslist+[tmp]
# loss = loss+tmp
for ee in train_watch_errors1:
for err in ee:
if err is not None:
tmp = err.mean(dtype=floatX)
losslist = losslist+[tmp]
# loss = loss+tmp
return loss, losslist, tagslice
def get_watchslice(watchpoints):
trainwatch = {}
valwatch = {}
for tag, errs in watchpoints:
if tag.startswith('train:'):
trainwatch[tag] = errs
else:
valwatch[tag] = errs
ig, train_values, train_tagslice = get_loss(trainwatch, [])
ig, val_values, val_tagslice = get_loss(valwatch, [])
return train_values, train_tagslice, val_values, val_tagslice
|
mit
|
michelts/lettuce
|
tests/integration/lib/Django-1.3/tests/regressiontests/admin_changelist/models.py
|
51
|
1353
|
from django.db import models
class Parent(models.Model):
name = models.CharField(max_length=128)
class Child(models.Model):
parent = models.ForeignKey(Parent, editable=False, null=True)
name = models.CharField(max_length=30, blank=True)
class Genre(models.Model):
name = models.CharField(max_length=20)
class Band(models.Model):
name = models.CharField(max_length=20)
nr_of_members = models.PositiveIntegerField()
genres = models.ManyToManyField(Genre)
class Musician(models.Model):
name = models.CharField(max_length=30)
def __unicode__(self):
return self.name
class Group(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(Musician, through='Membership')
def __unicode__(self):
return self.name
class Membership(models.Model):
music = models.ForeignKey(Musician)
group = models.ForeignKey(Group)
role = models.CharField(max_length=15)
class Quartet(Group):
pass
class ChordsMusician(Musician):
pass
class ChordsBand(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(ChordsMusician, through='Invitation')
class Invitation(models.Model):
player = models.ForeignKey(ChordsMusician)
band = models.ForeignKey(ChordsBand)
instrument = models.CharField(max_length=15)
|
gpl-3.0
|
DevynCJohnson/Pybooster
|
pylib/convvolume.py
|
1
|
120739
|
#!/usr/bin/env python3
# -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# vim: set fileencoding=utf-8 filetype=python syntax=python.doxygen fileformat=unix tabstop=4 expandtab :
# kate: encoding utf-8; bom off; syntax python; indent-mode python; eol unix; replace-tabs off; indent-width 4; tab-width 4; remove-trailing-space on;
"""@brief Volume measurement conversions.
@file convvolume.py
@package pybooster.convvolume
@version 2020.08.08
@author Devyn Collier Johnson <DevynCJohnson@Gmail.com>
@copyright LGPLv3
@section LICENSE
GNU Lesser General Public License v3
Copyright (c) Devyn Collier Johnson, All rights reserved.
This software is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software.
"""
from typing import Union
__all__: list = [
# CANADIAN CUPS #
r'cancup2cubicinch',
r'cancup2cubicmeter',
r'cancup2liter',
r'cancup2metriccup',
r'cancup2metrictablespoon',
r'cancup2metricteaspoon',
r'cancup2ukfluidounce',
r'cancup2ukgallon',
r'cancup2ukgill',
r'cancup2ukpint',
r'cancup2ukquart',
r'cancup2uktablespoon',
r'cancup2ukteaspoon',
r'cancup2uscup',
r'cancup2usdrygallon',
r'cancup2usdrypint',
r'cancup2usdryquart',
r'cancup2usfluidounce',
r'cancup2usgill',
r'cancup2usliquidgallon',
r'cancup2usliquidpint',
r'cancup2usliquidquart',
r'cancup2ustablespoon',
r'cancup2usteaspoon',
# CUBIC INCHES #
r'cubicinch2cancup',
r'cubicinch2cubicmeter',
r'cubicinch2liter',
r'cubicinch2metriccup',
r'cubicinch2metrictablespoon',
r'cubicinch2metricteaspoon',
r'cubicinch2ukfluidounce',
r'cubicinch2ukgallon',
r'cubicinch2ukgill',
r'cubicinch2ukpint',
r'cubicinch2ukquart',
r'cubicinch2uktablespoon',
r'cubicinch2ukteaspoon',
r'cubicinch2uscup',
r'cubicinch2usdrygallon',
r'cubicinch2usdrypint',
r'cubicinch2usdryquart',
r'cubicinch2usfluidounce',
r'cubicinch2usgill',
r'cubicinch2usliquidgallon',
r'cubicinch2usliquidpint',
r'cubicinch2usliquidquart',
r'cubicinch2ustablespoon',
r'cubicinch2usteaspoon',
# CUBIC METERS #
r'cubicmeter2cancup',
r'cubicmeter2cubicinch',
r'cubicmeter2liter',
r'cubicmeter2metriccup',
r'cubicmeter2metrictablespoon',
r'cubicmeter2metricteaspoon',
r'cubicmeter2ukfluidounce',
r'cubicmeter2ukgallon',
r'cubicmeter2ukgill',
r'cubicmeter2ukpint',
r'cubicmeter2ukquart',
r'cubicmeter2uktablespoon',
r'cubicmeter2ukteaspoon',
r'cubicmeter2uscup',
r'cubicmeter2usdrygallon',
r'cubicmeter2usdrypint',
r'cubicmeter2usdryquart',
r'cubicmeter2usfluidounce',
r'cubicmeter2usgill',
r'cubicmeter2usliquidgallon',
r'cubicmeter2usliquidpint',
r'cubicmeter2usliquidquart',
r'cubicmeter2ustablespoon',
r'cubicmeter2usteaspoon',
# LITERS #
r'liter2cancup',
r'liter2cubicinch',
r'liter2cubicmeter',
r'liter2metriccup',
r'liter2metrictablespoon',
r'liter2metricteaspoon',
r'liter2ukfluidounce',
r'liter2ukgallon',
r'liter2ukgill',
r'liter2ukpint',
r'liter2ukquart',
r'liter2uktablespoon',
r'liter2ukteaspoon',
r'liter2uscup',
r'liter2usdrygallon',
r'liter2usdrypint',
r'liter2usdryquart',
r'liter2usfluidounce',
r'liter2usgill',
r'liter2usliquidgallon',
r'liter2usliquidpint',
r'liter2usliquidquart',
r'liter2ustablespoon',
r'liter2usteaspoon',
# METRIC CUPS #
r'metriccup2cancup',
r'metriccup2cubicinch',
r'metriccup2cubicmeter',
r'metriccup2liter',
r'metriccup2metrictablespoon',
r'metriccup2metricteaspoon',
r'metriccup2ukfluidounce',
r'metriccup2ukgallon',
r'metriccup2ukgill',
r'metriccup2ukpint',
r'metriccup2ukquart',
r'metriccup2uktablespoon',
r'metriccup2ukteaspoon',
r'metriccup2uscup',
r'metriccup2usdrygallon',
r'metriccup2usdrypint',
r'metriccup2usdryquart',
r'metriccup2usfluidounce',
r'metriccup2usgill',
r'metriccup2usliquidgallon',
r'metriccup2usliquidpint',
r'metriccup2usliquidquart',
r'metriccup2ustablespoon',
r'metriccup2usteaspoon',
# METRIC TABLESPOONS #
r'metrictablespoon2cancup',
r'metrictablespoon2cubicinch',
r'metrictablespoon2cubicmeter',
r'metrictablespoon2liter',
r'metrictablespoon2metriccup',
r'metrictablespoon2metricteaspoon',
r'metrictablespoon2ukfluidounce',
r'metrictablespoon2ukgallon',
r'metrictablespoon2ukgill',
r'metrictablespoon2ukpint',
r'metrictablespoon2ukquart',
r'metrictablespoon2uktablespoon',
r'metrictablespoon2ukteaspoon',
r'metrictablespoon2uscup',
r'metrictablespoon2usdrygallon',
r'metrictablespoon2usdrypint',
r'metrictablespoon2usdryquart',
r'metrictablespoon2usfluidounce',
r'metrictablespoon2usgill',
r'metrictablespoon2usliquidgallon',
r'metrictablespoon2usliquidpint',
r'metrictablespoon2usliquidquart',
r'metrictablespoon2ustablespoon',
r'metrictablespoon2usteaspoon',
# METRIC TEASPOONS #
r'metricteaspoon2cancup',
r'metricteaspoon2cubicinch',
r'metricteaspoon2cubicmeter',
r'metricteaspoon2liter',
r'metricteaspoon2metriccup',
r'metricteaspoon2metrictablespoon',
r'metricteaspoon2ukfluidounce',
r'metricteaspoon2ukgallon',
r'metricteaspoon2ukgill',
r'metricteaspoon2ukpint',
r'metricteaspoon2ukquart',
r'metricteaspoon2uktablespoon',
r'metricteaspoon2ukteaspoon',
r'metricteaspoon2uscup',
r'metricteaspoon2usdrygallon',
r'metricteaspoon2usdrypint',
r'metricteaspoon2usdryquart',
r'metricteaspoon2usfluidounce',
r'metricteaspoon2usgill',
r'metricteaspoon2usliquidgallon',
r'metricteaspoon2usliquidpint',
r'metricteaspoon2usliquidquart',
r'metricteaspoon2ustablespoon',
r'metricteaspoon2usteaspoon',
# UK FLUID OUNCES #
r'ukfluidounce2cancup',
r'ukfluidounce2cubicinch',
r'ukfluidounce2cubicmeter',
r'ukfluidounce2liter',
r'ukfluidounce2metriccup',
r'ukfluidounce2metrictablespoon',
r'ukfluidounce2metricteaspoon',
r'ukfluidounce2ukgallon',
r'ukfluidounce2ukgill',
r'ukfluidounce2ukpint',
r'ukfluidounce2ukquart',
r'ukfluidounce2uktablespoon',
r'ukfluidounce2ukteaspoon',
r'ukfluidounce2uscup',
r'ukfluidounce2usdrygallon',
r'ukfluidounce2usdrypint',
r'ukfluidounce2usdryquart',
r'ukfluidounce2usfluidounce',
r'ukfluidounce2usgill',
r'ukfluidounce2usliquidgallon',
r'ukfluidounce2usliquidpint',
r'ukfluidounce2usliquidquart',
r'ukfluidounce2ustablespoon',
r'ukfluidounce2usteaspoon',
# UK GALLONS #
r'ukgallon2cancup',
r'ukgallon2cubicinch',
r'ukgallon2cubicmeter',
r'ukgallon2liter',
r'ukgallon2metriccup',
r'ukgallon2metrictablespoon',
r'ukgallon2metricteaspoon',
r'ukgallon2ukfluidounce',
r'ukgallon2ukgill',
r'ukgallon2ukpint',
r'ukgallon2ukquart',
r'ukgallon2uktablespoon',
r'ukgallon2ukteaspoon',
r'ukgallon2uscup',
r'ukgallon2usdrygallon',
r'ukgallon2usdrypint',
r'ukgallon2usdryquart',
r'ukgallon2usfluidounce',
r'ukgallon2usgill',
r'ukgallon2usliquidgallon',
r'ukgallon2usliquidpint',
r'ukgallon2usliquidquart',
r'ukgallon2ustablespoon',
r'ukgallon2usteaspoon',
# UK GILLS #
r'ukgill2cancup',
r'ukgill2cubicinch',
r'ukgill2cubicmeter',
r'ukgill2liter',
r'ukgill2metriccup',
r'ukgill2metrictablespoon',
r'ukgill2metricteaspoon',
r'ukgill2ukfluidounce',
r'ukgill2ukgallon',
r'ukgill2ukpint',
r'ukgill2ukquart',
r'ukgill2uktablespoon',
r'ukgill2ukteaspoon',
r'ukgill2uscup',
r'ukgill2usdrygallon',
r'ukgill2usdrypint',
r'ukgill2usdryquart',
r'ukgill2usfluidounce',
r'ukgill2usgill',
r'ukgill2usliquidgallon',
r'ukgill2usliquidpint',
r'ukgill2usliquidquart',
r'ukgill2ustablespoon',
r'ukgill2usteaspoon',
# UK PINTS #
r'ukpint2cancup',
r'ukpint2cubicinch',
r'ukpint2cubicmeter',
r'ukpint2liter',
r'ukpint2metriccup',
r'ukpint2metrictablespoon',
r'ukpint2metricteaspoon',
r'ukpint2ukfluidounce',
r'ukpint2ukgallon',
r'ukpint2ukgill',
r'ukpint2ukquart',
r'ukpint2uktablespoon',
r'ukpint2ukteaspoon',
r'ukpint2uscup',
r'ukpint2usdrygallon',
r'ukpint2usdrypint',
r'ukpint2usdryquart',
r'ukpint2usfluidounce',
r'ukpint2usgill',
r'ukpint2usliquidgallon',
r'ukpint2usliquidpint',
r'ukpint2usliquidquart',
r'ukpint2ustablespoon',
r'ukpint2usteaspoon',
# UK QUARTS #
r'ukquart2cancup',
r'ukquart2cubicinch',
r'ukquart2cubicmeter',
r'ukquart2liter',
r'ukquart2metriccup',
r'ukquart2metrictablespoon',
r'ukquart2metricteaspoon',
r'ukquart2ukfluidounce',
r'ukquart2ukgallon',
r'ukquart2ukgill',
r'ukquart2ukpint',
r'ukquart2uktablespoon',
r'ukquart2ukteaspoon',
r'ukquart2uscup',
r'ukquart2usdrygallon',
r'ukquart2usdrypint',
r'ukquart2usdryquart',
r'ukquart2usfluidounce',
r'ukquart2usgill',
r'ukquart2usliquidgallon',
r'ukquart2usliquidpint',
r'ukquart2usliquidquart',
r'ukquart2ustablespoon',
r'ukquart2usteaspoon',
# UK TABLESPOONS #
r'uktablespoon2cancup',
r'uktablespoon2cubicinch',
r'uktablespoon2cubicmeter',
r'uktablespoon2liter',
r'uktablespoon2metriccup',
r'uktablespoon2metrictablespoon',
r'uktablespoon2metricteaspoon',
r'uktablespoon2ukfluidounce',
r'uktablespoon2ukgallon',
r'uktablespoon2ukgill',
r'uktablespoon2ukpint',
r'uktablespoon2ukquart',
r'uktablespoon2ukteaspoon',
r'uktablespoon2uscup',
r'uktablespoon2usdrygallon',
r'uktablespoon2usdrypint',
r'uktablespoon2usdryquart',
r'uktablespoon2usfluidounce',
r'uktablespoon2usgill',
r'uktablespoon2usliquidgallon',
r'uktablespoon2usliquidpint',
r'uktablespoon2usliquidquart',
r'uktablespoon2ustablespoon',
r'uktablespoon2usteaspoon',
# UK TEASPOONS #
r'ukteaspoon2cancup',
r'ukteaspoon2cubicinch',
r'ukteaspoon2cubicmeter',
r'ukteaspoon2liter',
r'ukteaspoon2metriccup',
r'ukteaspoon2metrictablespoon',
r'ukteaspoon2metricteaspoon',
r'ukteaspoon2ukfluidounce',
r'ukteaspoon2ukgallon',
r'ukteaspoon2ukgill',
r'ukteaspoon2ukpint',
r'ukteaspoon2ukquart',
r'ukteaspoon2uktablespoon',
r'ukteaspoon2uscup',
r'ukteaspoon2usdrygallon',
r'ukteaspoon2usdrypint',
r'ukteaspoon2usdryquart',
r'ukteaspoon2usfluidounce',
r'ukteaspoon2usgill',
r'ukteaspoon2usliquidgallon',
r'ukteaspoon2usliquidpint',
r'ukteaspoon2usliquidquart',
r'ukteaspoon2ustablespoon',
r'ukteaspoon2usteaspoon',
# US CUPS #
r'uscup2cancup',
r'uscup2cubicinch',
r'uscup2cubicmeter',
r'uscup2liter',
r'uscup2metriccup',
r'uscup2metrictablespoon',
r'uscup2metricteaspoon',
r'uscup2ukfluidounce',
r'uscup2ukgallon',
r'uscup2ukgill',
r'uscup2ukpint',
r'uscup2ukquart',
r'uscup2uktablespoon',
r'uscup2ukteaspoon',
r'uscup2usdrygallon',
r'uscup2usdrypint',
r'uscup2usdryquart',
r'uscup2usfluidounce',
r'uscup2usgill',
r'uscup2usliquidgallon',
r'uscup2usliquidpint',
r'uscup2usliquidquart',
r'uscup2ustablespoon',
r'uscup2usteaspoon',
# US DRY GALLONS #
r'usdrygallon2cancup',
r'usdrygallon2cubicinch',
r'usdrygallon2cubicmeter',
r'usdrygallon2liter',
r'usdrygallon2metriccup',
r'usdrygallon2metrictablespoon',
r'usdrygallon2metricteaspoon',
r'usdrygallon2ukfluidounce',
r'usdrygallon2ukgallon',
r'usdrygallon2ukgill',
r'usdrygallon2ukpint',
r'usdrygallon2ukquart',
r'usdrygallon2uktablespoon',
r'usdrygallon2ukteaspoon',
r'usdrygallon2uscup',
r'usdrygallon2usdrypint',
r'usdrygallon2usdryquart',
r'usdrygallon2usfluidounce',
r'usdrygallon2usgill',
r'usdrygallon2usliquidgallon',
r'usdrygallon2usliquidpint',
r'usdrygallon2usliquidquart',
r'usdrygallon2ustablespoon',
r'usdrygallon2usteaspoon',
# US DRY PINTS #
r'usdrypint2cancup',
r'usdrypint2cubicinch',
r'usdrypint2cubicmeter',
r'usdrypint2liter',
r'usdrypint2metriccup',
r'usdrypint2metrictablespoon',
r'usdrypint2metricteaspoon',
r'usdrypint2ukfluidounce',
r'usdrypint2ukgallon',
r'usdrypint2ukgill',
r'usdrypint2ukpint',
r'usdrypint2ukquart',
r'usdrypint2uktablespoon',
r'usdrypint2ukteaspoon',
r'usdrypint2uscup',
r'usdrypint2usdrygallon',
r'usdrypint2usdryquart',
r'usdrypint2usfluidounce',
r'usdrypint2usgill',
r'usdrypint2usliquidgallon',
r'usdrypint2usliquidpint',
r'usdrypint2usliquidquart',
r'usdrypint2ustablespoon',
r'usdrypint2usteaspoon',
# US DRY QUARTS #
r'usdryquart2cancup',
r'usdryquart2cubicinch',
r'usdryquart2cubicmeter',
r'usdryquart2liter',
r'usdryquart2metriccup',
r'usdryquart2metrictablespoon',
r'usdryquart2metricteaspoon',
r'usdryquart2ukfluidounce',
r'usdryquart2ukgallon',
r'usdryquart2ukgill',
r'usdryquart2ukpint',
r'usdryquart2ukquart',
r'usdryquart2uktablespoon',
r'usdryquart2ukteaspoon',
r'usdryquart2uscup',
r'usdryquart2usdrygallon',
r'usdryquart2usdrypint',
r'usdryquart2usfluidounce',
r'usdryquart2usgill',
r'usdryquart2usliquidgallon',
r'usdryquart2usliquidpint',
r'usdryquart2usliquidquart',
r'usdryquart2ustablespoon',
r'usdryquart2usteaspoon',
# US FLUID OUNCES #
r'usfluidounce2cancup',
r'usfluidounce2cubicinch',
r'usfluidounce2cubicmeter',
r'usfluidounce2liter',
r'usfluidounce2metriccup',
r'usfluidounce2metrictablespoon',
r'usfluidounce2metricteaspoon',
r'usfluidounce2ukfluidounce',
r'usfluidounce2ukgallon',
r'usfluidounce2ukgill',
r'usfluidounce2ukpint',
r'usfluidounce2ukquart',
r'usfluidounce2uktablespoon',
r'usfluidounce2ukteaspoon',
r'usfluidounce2uscup',
r'usfluidounce2usdrygallon',
r'usfluidounce2usdrypint',
r'usfluidounce2usdryquart',
r'usfluidounce2usgill',
r'usfluidounce2usliquidgallon',
r'usfluidounce2usliquidpint',
r'usfluidounce2usliquidquart',
r'usfluidounce2ustablespoon',
r'usfluidounce2usteaspoon',
# US GILLS #
r'usgill2cancup',
r'usgill2cubicinch',
r'usgill2cubicmeter',
r'usgill2liter',
r'usgill2metriccup',
r'usgill2metrictablespoon',
r'usgill2metricteaspoon',
r'usgill2ukfluidounce',
r'usgill2ukgallon',
r'usgill2ukgill',
r'usgill2ukpint',
r'usgill2ukquart',
r'usgill2uktablespoon',
r'usgill2ukteaspoon',
r'usgill2uscup',
r'usgill2usdrygallon',
r'usgill2usdrypint',
r'usgill2usdryquart',
r'usgill2usfluidounce',
r'usgill2usliquidgallon',
r'usgill2usliquidpint',
r'usgill2usliquidquart',
r'usgill2ustablespoon',
r'usgill2usteaspoon',
# US LIQUID GALLONS #
r'usliquidgallon2cancup',
r'usliquidgallon2cubicinch',
r'usliquidgallon2cubicmeter',
r'usliquidgallon2liter',
r'usliquidgallon2metriccup',
r'usliquidgallon2metrictablespoon',
r'usliquidgallon2metricteaspoon',
r'usliquidgallon2ukfluidounce',
r'usliquidgallon2ukgallon',
r'usliquidgallon2ukgill',
r'usliquidgallon2ukpint',
r'usliquidgallon2ukquart',
r'usliquidgallon2uktablespoon',
r'usliquidgallon2ukteaspoon',
r'usliquidgallon2uscup',
r'usliquidgallon2usdrygallon',
r'usliquidgallon2usdrypint',
r'usliquidgallon2usdryquart',
r'usliquidgallon2usfluidounce',
r'usliquidgallon2usgill',
r'usliquidgallon2usliquidpint',
r'usliquidgallon2usliquidquart',
r'usliquidgallon2ustablespoon',
r'usliquidgallon2usteaspoon',
# US LIQUID PINTS #
r'usliquidpint2cancup',
r'usliquidpint2cubicinch',
r'usliquidpint2cubicmeter',
r'usliquidpint2liter',
r'usliquidpint2metriccup',
r'usliquidpint2metrictablespoon',
r'usliquidpint2metricteaspoon',
r'usliquidpint2ukfluidounce',
r'usliquidpint2ukgallon',
r'usliquidpint2ukgill',
r'usliquidpint2ukpint',
r'usliquidpint2ukquart',
r'usliquidpint2uktablespoon',
r'usliquidpint2ukteaspoon',
r'usliquidpint2uscup',
r'usliquidpint2usdrygallon',
r'usliquidpint2usdrypint',
r'usliquidpint2usdryquart',
r'usliquidpint2usfluidounce',
r'usliquidpint2usgill',
r'usliquidpint2usliquidgallon',
r'usliquidpint2usliquidquart',
r'usliquidpint2ustablespoon',
r'usliquidpint2usteaspoon',
# US LIQUID QUARTS #
r'usliquidquart2cancup',
r'usliquidquart2cubicinch',
r'usliquidquart2cubicmeter',
r'usliquidquart2liter',
r'usliquidquart2metriccup',
r'usliquidquart2metrictablespoon',
r'usliquidquart2metricteaspoon',
r'usliquidquart2ukfluidounce',
r'usliquidquart2ukgallon',
r'usliquidquart2ukgill',
r'usliquidquart2ukpint',
r'usliquidquart2ukquart',
r'usliquidquart2uktablespoon',
r'usliquidquart2ukteaspoon',
r'usliquidquart2uscup',
r'usliquidquart2usdrygallon',
r'usliquidquart2usdrypint',
r'usliquidquart2usdryquart',
r'usliquidquart2usfluidounce',
r'usliquidquart2usgill',
r'usliquidquart2usliquidgallon',
r'usliquidquart2usliquidpint',
r'usliquidquart2ustablespoon',
r'usliquidquart2usteaspoon',
# US TABLESPOONS #
r'ustablespoon2cancup',
r'ustablespoon2cubicinch',
r'ustablespoon2cubicmeter',
r'ustablespoon2liter',
r'ustablespoon2metriccup',
r'ustablespoon2metrictablespoon',
r'ustablespoon2metricteaspoon',
r'ustablespoon2ukfluidounce',
r'ustablespoon2ukgallon',
r'ustablespoon2ukgill',
r'ustablespoon2ukpint',
r'ustablespoon2ukquart',
r'ustablespoon2uktablespoon',
r'ustablespoon2ukteaspoon',
r'ustablespoon2uscup',
r'ustablespoon2usdrygallon',
r'ustablespoon2usdrypint',
r'ustablespoon2usdryquart',
r'ustablespoon2usfluidounce',
r'ustablespoon2usgill',
r'ustablespoon2usliquidgallon',
r'ustablespoon2usliquidpint',
r'ustablespoon2usliquidquart',
r'ustablespoon2usteaspoon',
# US TEASPOONS #
r'usteaspoon2cancup',
r'usteaspoon2cubicinch',
r'usteaspoon2cubicmeter',
r'usteaspoon2liter',
r'usteaspoon2metriccup',
r'usteaspoon2metrictablespoon',
r'usteaspoon2metricteaspoon',
r'usteaspoon2ukfluidounce',
r'usteaspoon2ukgallon',
r'usteaspoon2ukgill',
r'usteaspoon2ukpint',
r'usteaspoon2ukquart',
r'usteaspoon2uktablespoon',
r'usteaspoon2ukteaspoon',
r'usteaspoon2uscup',
r'usteaspoon2usdrygallon',
r'usteaspoon2usdrypint',
r'usteaspoon2usdryquart',
r'usteaspoon2usfluidounce',
r'usteaspoon2usgill',
r'usteaspoon2usliquidgallon',
r'usteaspoon2usliquidpint',
r'usteaspoon2usliquidquart',
r'usteaspoon2ustablespoon'
]
# CUBIC INCHES #
def cancup2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> Cubic Inches."""
return round(_volume * 13.871, _round)
def cancup2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> Cubic Meters."""
return round(_volume * 0.00022730372323498659, _round)
def cancup2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> Liters."""
return round(_volume * 0.22730372323498658908, _round)
def cancup2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> Metric Cups."""
return round(_volume * 0.90922, _round)
def cancup2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> Metric Tablespoons."""
return round(_volume * 15.154, _round)
def cancup2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> Metric Teaspoons."""
return round(_volume * 45.461, _round)
def cancup2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Fluid Ounces."""
return round(_volume * 8.0, _round)
def cancup2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Gallons."""
return round(_volume * 0.050000, _round)
def cancup2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Gills."""
return round(_volume * 1.6000, _round)
def cancup2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Pints."""
return round(_volume * 0.40000, _round)
def cancup2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Quarts."""
return round(_volume * 0.20000, _round)
def cancup2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Tablespoons."""
return round(_volume * 16.0, _round)
def cancup2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Teaspoons."""
return round(_volume * 64.0, _round)
def cancup2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Cups."""
return round(_volume * 0.96076, _round)
def cancup2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Dry Gallons."""
return round(_volume * 0.051603, _round)
def cancup2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Dry Pints."""
return round(_volume * 0.41282, _round)
def cancup2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Dry Quarts."""
return round(_volume * 0.20641, _round)
def cancup2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Fluid Ounces."""
return round(_volume * 7.6861, _round)
def cancup2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Gills."""
return round(_volume * 1.9215, _round)
def cancup2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Liquid Gallons."""
return round(_volume * 0.060047, _round)
def cancup2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Liquid Pints."""
return round(_volume * 0.48038, _round)
def cancup2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Liquid Quarts."""
return round(_volume * 0.24019, _round)
def cancup2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Tablespoons."""
return round(_volume * 15.372, _round)
def cancup2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Teaspoons."""
return round(_volume * 46.116, _round)
# CUBIC INCHES #
def cubicinch2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> Canadian Cups."""
return round(_volume * 0.072093, _round)
def cubicinch2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> Cubic Meters."""
return round(_volume * 0.000016386995, _round)
def cubicinch2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> Liters."""
return round(_volume * 0.016386995, _round)
def cubicinch2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> Metric Cups."""
return round(_volume * 0.065548, _round)
def cubicinch2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> Metric Tablespoons."""
return round(_volume * 1.0925, _round)
def cubicinch2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> Metric Teaspoons."""
return round(_volume * 3.2774, _round)
def cubicinch2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Fluid Ounces."""
return round(_volume * 0.57674, _round)
def cubicinch2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Gallons."""
return round(_volume * 0.0036047, _round)
def cubicinch2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Gills."""
return round(_volume * 0.11535, _round)
def cubicinch2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Pints."""
return round(_volume * 0.028837, _round)
def cubicinch2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Quarts."""
return round(_volume * 0.014419, _round)
def cubicinch2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Tablespoons."""
return round(_volume * 1.1535, _round)
def cubicinch2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Teaspoons."""
return round(_volume * 4.6139, _round)
def cubicinch2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Cups."""
return round(_volume * 0.069264, _round)
def cubicinch2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Dry Gallons."""
return round(_volume * 0.0037202, _round)
def cubicinch2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Dry Pints."""
return round(_volume * 0.029762, _round)
def cubicinch2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Dry Quarts."""
return round(_volume * 0.014881, _round)
def cubicinch2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Fluid Ounces."""
return round(_volume * 0.55411, _round)
def cubicinch2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Gills."""
return round(_volume * 0.13853, _round)
def cubicinch2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Liquid Gallons."""
return round(_volume * 0.0043290, _round)
def cubicinch2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Liquid Pints."""
return round(_volume * 0.034632, _round)
def cubicinch2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Liquid Quarts."""
return round(_volume * 0.017316, _round)
def cubicinch2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Tablespoons."""
return round(_volume * 1.1082, _round)
def cubicinch2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Teaspoons."""
return round(_volume * 3.3247, _round)
# CUBIC METERS #
def cubicmeter2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> Canadian Cups."""
return round(_volume * 4399.4, _round)
def cubicmeter2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> Cubic Inches."""
return round(_volume * 61024.0, _round)
def cubicmeter2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> Liters."""
return round(_volume * 1000.0, _round)
def cubicmeter2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> Metric Cups."""
return round(_volume * 4000.0, _round)
def cubicmeter2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> Metric Tablespoons."""
return round(_volume * 66667.0, _round)
def cubicmeter2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> Metric Teaspoons."""
return round(_volume * 200000.0, _round)
def cubicmeter2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Fluid Ounces."""
return round(_volume * 35195.0, _round)
def cubicmeter2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Gallons."""
return round(_volume * 219.97, _round)
def cubicmeter2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Gills."""
return round(_volume * 7039.0, _round)
def cubicmeter2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Pints."""
return round(_volume * 1759.8, _round)
def cubicmeter2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Quarts."""
return round(_volume * 879.88, _round)
def cubicmeter2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Tablespoons."""
return round(_volume * 70390.0, _round)
def cubicmeter2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Teaspoons."""
return round(_volume * 281560.0, _round)
def cubicmeter2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Cups."""
return round(_volume * 4226.8, _round)
def cubicmeter2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Dry Gallons."""
return round(_volume * 227.02, _round)
def cubicmeter2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Dry Pints."""
return round(_volume * 1816.2, _round)
def cubicmeter2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Dry Quarts."""
return round(_volume * 908.08, _round)
def cubicmeter2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Fluid Ounces."""
return round(_volume * 33814, _round)
def cubicmeter2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Gills."""
return round(_volume * 8453.5, _round)
def cubicmeter2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Liquid Gallons."""
return round(_volume * 264.17, _round)
def cubicmeter2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Liquid Pints."""
return round(_volume * 2113.4, _round)
def cubicmeter2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Liquid Quarts."""
return round(_volume * 1056.7, _round)
def cubicmeter2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Tablespoons."""
return round(_volume * 67628.0, _round)
def cubicmeter2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Teaspoons."""
return round(_volume * 202880.0, _round)
# LITERS #
def liter2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> Canadian Cups."""
return round(_volume * 4.3994, _round)
def liter2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> Cubic Inches."""
return round(_volume * 61.024, _round)
def liter2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> Cubic Meters."""
return round(_volume * 0.001, _round)
def liter2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> Metric Cups."""
return round(_volume * 4.0, _round)
def liter2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> Metric Tablespoons."""
return round(_volume * 66.667, _round)
def liter2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> Metric Teaspoons."""
return round(_volume * 200.0, _round)
def liter2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Fluid Ounces."""
return round(_volume * 35.195, _round)
def liter2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Gallons."""
return round(_volume * 0.21997, _round)
def liter2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Gills."""
return round(_volume * 7.039, _round)
def liter2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Pints."""
return round(_volume * 1.7598, _round)
def liter2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Quarts."""
return round(_volume * 0.87988, _round)
def liter2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Tablespoons."""
return round(_volume * 70.39, _round)
def liter2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Teaspoons."""
return round(_volume * 281.560, _round)
def liter2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Cups."""
return round(_volume * 4.2268, _round)
def liter2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Dry Gallons."""
return round(_volume * 0.22702, _round)
def liter2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Dry Pints."""
return round(_volume * 1.8162, _round)
def liter2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Dry Quarts."""
return round(_volume * 0.90808, _round)
def liter2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Fluid Ounces."""
return round(_volume * 33.814, _round)
def liter2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Gills."""
return round(_volume * 8.4535, _round)
def liter2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Liquid Gallons."""
return round(_volume * 0.26417, _round)
def liter2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Liquid Pints."""
return round(_volume * 2.1134, _round)
def liter2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Liquid Quarts."""
return round(_volume * 1.0567, _round)
def liter2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Tablespoons."""
return round(_volume * 67.628, _round)
def liter2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Teaspoons."""
return round(_volume * 202.8800, _round)
# METRIC CUPS #
def metriccup2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> Canadian Cups."""
return round(_volume * 1.0998, _round)
def metriccup2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> Cubic Inches."""
return round(_volume * 15.256, _round)
def metriccup2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> Cubic Meters."""
return round(_volume * 0.00025, _round)
def metriccup2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> Liters."""
return round(_volume * 0.250, _round)
def metriccup2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> Metric Tablespoons."""
return round(_volume * 16.667, _round)
def metriccup2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> Metric Teaspoons."""
return round(_volume * 50.0, _round)
def metriccup2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Fluid Ounces."""
return round(_volume * 8.7988, _round)
def metriccup2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Gallons."""
return round(_volume * 0.054992, _round)
def metriccup2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Gills."""
return round(_volume * 1.7598, _round)
def metriccup2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Pints."""
return round(_volume * 0.43994, _round)
def metriccup2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Quarts."""
return round(_volume * 0.21997, _round)
def metriccup2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Tablespoons."""
return round(_volume * 17.598, _round)
def metriccup2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Teaspoons."""
return round(_volume * 70.390, _round)
def metriccup2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Cups."""
return round(_volume * 1.0567, _round)
def metriccup2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Dry Gallons."""
return round(_volume * 0.056755, _round)
def metriccup2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Dry Pints."""
return round(_volume * 0.45404, _round)
def metriccup2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Dry Quarts."""
return round(_volume * 0.22702, _round)
def metriccup2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Fluid Ounces."""
return round(_volume * 8.4535, _round)
def metriccup2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Gills."""
return round(_volume * 2.1134, _round)
def metriccup2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Liquid Gallons."""
return round(_volume * 0.066043, _round)
def metriccup2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Liquid Pints."""
return round(_volume * 0.52834, _round)
def metriccup2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Liquid Quarts."""
return round(_volume * 0.26417, _round)
def metriccup2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Tablespoons."""
return round(_volume * 16.907, _round)
def metriccup2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Teaspoons."""
return round(_volume * 50.721, _round)
# METRIC TABLESPOONS #
def metrictablespoon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> Canadian Cups."""
return round(_volume * 0.065991, _round)
def metrictablespoon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> Cubic Inches."""
return round(_volume * 0.91536, _round)
def metrictablespoon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> Cubic Meters."""
return round(_volume * 0.000014999925000375, _round)
def metrictablespoon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> Liters."""
return round(_volume * 0.01499992500037499813, _round)
def metrictablespoon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> Metric Cups."""
return round(_volume * 0.060, _round)
def metrictablespoon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> Metric Teaspoons."""
return round(_volume * 3.0, _round)
def metrictablespoon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Fluid Ounces."""
return round(_volume * 0.52793, _round)
def metrictablespoon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Gallons."""
return round(_volume * 0.0032995, _round)
def metrictablespoon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Gills."""
return round(_volume * 0.10559, _round)
def metrictablespoon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Pints."""
return round(_volume * 0.026396, _round)
def metrictablespoon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Quarts."""
return round(_volume * 0.013198, _round)
def metrictablespoon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Tablespoons."""
return round(_volume * 1.0559, _round)
def metrictablespoon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Teaspoons."""
return round(_volume * 4.2234, _round)
def metrictablespoon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Cups."""
return round(_volume * 0.063401, _round)
def metrictablespoon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Dry Gallons."""
return round(_volume * 0.0034053, _round)
def metrictablespoon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Dry Pints."""
return round(_volume * 0.027242, _round)
def metrictablespoon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Dry Quarts."""
return round(_volume * 0.013621, _round)
def metrictablespoon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Fluid Ounces."""
return round(_volume * 0.50721, _round)
def metrictablespoon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Gills."""
return round(_volume * 0.12680, _round)
def metrictablespoon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Liquid Gallons."""
return round(_volume * 0.0039626, _round)
def metrictablespoon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Liquid Pints."""
return round(_volume * 0.031701, _round)
def metrictablespoon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Liquid Quarts."""
return round(_volume * 0.015850, _round)
def metrictablespoon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Tablespoons."""
return round(_volume * 1.0144, _round)
def metrictablespoon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Teaspoons."""
return round(_volume * 3.0433, _round)
# METRIC TEASPOONS #
def metricteaspoon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> Canadian Cups."""
return round(_volume * 0.021997, _round)
def metricteaspoon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> Cubic Inches."""
return round(_volume * 0.30512, _round)
def metricteaspoon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> Cubic Meters."""
return round(_volume * 0.000005, _round)
def metricteaspoon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> Liters."""
return round(_volume * 0.005, _round)
def metricteaspoon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> Metric Cups."""
return round(_volume * 0.020000, _round)
def metricteaspoon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> Metric Tablespoons."""
return round(_volume * 0.33333333, _round)
def metricteaspoon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Fluid Ounces."""
return round(_volume * 0.17598, _round)
def metricteaspoon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Gallons."""
return round(_volume * 0.0010998, _round)
def metricteaspoon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Gills."""
return round(_volume * 0.035195, _round)
def metricteaspoon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Pints."""
return round(_volume * 0.0087988, _round)
def metricteaspoon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Quarts."""
return round(_volume * 0.0043994, _round)
def metricteaspoon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Tablespoons."""
return round(_volume * 0.35195, _round)
def metricteaspoon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Teaspoons."""
return round(_volume * 1.4078, _round)
def metricteaspoon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Cups."""
return round(_volume * 0.021134, _round)
def metricteaspoon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Dry Gallons."""
return round(_volume * 0.0011351, _round)
def metricteaspoon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Dry Pints."""
return round(_volume * 0.0090808, _round)
def metricteaspoon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Dry Quarts."""
return round(_volume * 0.0045404, _round)
def metricteaspoon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Fluid Ounces."""
return round(_volume * 0.16907, _round)
def metricteaspoon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Gills."""
return round(_volume * 0.042268, _round)
def metricteaspoon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Liquid Gallons."""
return round(_volume * 0.0013209, _round)
def metricteaspoon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Liquid Pints."""
return round(_volume * 0.010567, _round)
def metricteaspoon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Liquid Quarts."""
return round(_volume * 0.0052834, _round)
def metricteaspoon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Tablespoons."""
return round(_volume * 0.33814, _round)
def metricteaspoon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Teaspoons."""
return round(_volume * 1.0144, _round)
# UK FLUID OUNCES #
def ukfluidounce2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Canadian Cups."""
return round(_volume * 0.12500, _round)
def ukfluidounce2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Cubic Inches."""
return round(_volume * 1.7339, _round)
def ukfluidounce2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Cubic Meters."""
return round(_volume * 0.00002841312686461145, _round)
def ukfluidounce2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Liters."""
return round(_volume * 0.02841312686461145049, _round)
def ukfluidounce2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Metric Cups."""
return round(_volume * 0.11365, _round)
def ukfluidounce2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Metric Tablespoons."""
return round(_volume * 1.8942, _round)
def ukfluidounce2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Metric Teaspoons."""
return round(_volume * 5.6826, _round)
def ukfluidounce2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> UK Gallons."""
return round(_volume * 0.0062500, _round)
def ukfluidounce2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> UK Gills."""
return round(_volume * 0.20000, _round)
def ukfluidounce2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> UK Pints."""
return round(_volume * 0.050000, _round)
def ukfluidounce2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> UK Quarts."""
return round(_volume * 0.025000, _round)
def ukfluidounce2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> UK Tablespoons."""
return round(_volume * 2.0000, _round)
def ukfluidounce2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> UK Teaspoons."""
return round(_volume * 8.0000, _round)
def ukfluidounce2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Cups."""
return round(_volume * 0.12009, _round)
def ukfluidounce2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Dry Gallons."""
return round(_volume * 0.0064504, _round)
def ukfluidounce2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Dry Pints."""
return round(_volume * 0.051603, _round)
def ukfluidounce2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Dry Quarts."""
return round(_volume * 0.025801, _round)
def ukfluidounce2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Fluid Ounces."""
return round(_volume * 0.96076, _round)
def ukfluidounce2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Gills."""
return round(_volume * 0.24019, _round)
def ukfluidounce2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Liquid Gallons."""
return round(_volume * 0.0075059, _round)
def ukfluidounce2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Liquid Pints."""
return round(_volume * 0.060047, _round)
def ukfluidounce2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Liquid Quarts."""
return round(_volume * 0.030024, _round)
def ukfluidounce2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Tablespoons."""
return round(_volume * 1.9215, _round)
def ukfluidounce2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Teaspoons."""
return round(_volume * 5.7646, _round)
# UK GALLONS #
def ukgallon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Canadian Cups."""
return round(_volume * 20.000, _round)
def ukgallon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Cubic Inches."""
return round(_volume * 277.42, _round)
def ukgallon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Cubic Meters."""
return round(_volume * 0.00454607446469973178, _round)
def ukgallon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Liters."""
return round(_volume * 4.54607446469973178161, _round)
def ukgallon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Metric Cups."""
return round(_volume * 18.184, _round)
def ukgallon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Metric Tablespoons."""
return round(_volume * 303.07, _round)
def ukgallon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Metric Teaspoons."""
return round(_volume * 909.22, _round)
def ukgallon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> UK Fluid Ounces."""
return round(_volume * 160.00, _round)
def ukgallon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> UK Gills."""
return round(_volume * 32.000, _round)
def ukgallon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> UK Pints."""
return round(_volume * 8.0000, _round)
def ukgallon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> UK Quarts."""
return round(_volume * 4.0000, _round)
def ukgallon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> UK Tablespoons."""
return round(_volume * 320.00, _round)
def ukgallon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> UK Teaspoons."""
return round(_volume * 1280.0, _round)
def ukgallon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Cups."""
return round(_volume * 19.215, _round)
def ukgallon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Dry Gallons."""
return round(_volume * 1.0321, _round)
def ukgallon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Dry Pints."""
return round(_volume * 8.2565, _round)
def ukgallon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Dry Quarts."""
return round(_volume * 4.1282, _round)
def ukgallon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Fluid Ounces."""
return round(_volume * 153.72, _round)
def ukgallon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Gills."""
return round(_volume * 38.430, _round)
def ukgallon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Liquid Gallons."""
return round(_volume * 1.2009, _round)
def ukgallon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Liquid Pints."""
return round(_volume * 9.6076, _round)
def ukgallon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Liquid Quarts."""
return round(_volume * 4.8038, _round)
def ukgallon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Tablespoons."""
return round(_volume * 307.44, _round)
def ukgallon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Teaspoons."""
return round(_volume * 922.33, _round)
# UK GILLS #
def ukgill2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Canadian Cups."""
return round(_volume * 0.62500, _round)
def ukgill2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Cubic Inches."""
return round(_volume * 8.6694, _round)
def ukgill2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Cubic Meters."""
return round(_volume * 0.00014206563432305725, _round)
def ukgill2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Liters."""
return round(_volume * 0.14206563432305725245, _round)
def ukgill2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Metric Cups."""
return round(_volume * 0.56826, _round)
def ukgill2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Metric Tablespoons."""
return round(_volume * 9.4710, _round)
def ukgill2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Metric Teaspoons."""
return round(_volume * 28.413, _round)
def ukgill2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> UK Fluid Ounces."""
return round(_volume * 5.0000, _round)
def ukgill2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> UK Gallons."""
return round(_volume * 0.031250, _round)
def ukgill2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> UK Pints."""
return round(_volume * 0.25000, _round)
def ukgill2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> UK Quarts."""
return round(_volume * 0.12500, _round)
def ukgill2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> UK Tablespoons."""
return round(_volume * 10.000, _round)
def ukgill2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> UK Teaspoons."""
return round(_volume * 40.000, _round)
def ukgill2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Cups."""
return round(_volume * 0.60047, _round)
def ukgill2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Dry Gallons."""
return round(_volume * 0.032252, _round)
def ukgill2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Dry Pints."""
return round(_volume * 0.25801, _round)
def ukgill2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Dry Quarts."""
return round(_volume * 0.12901, _round)
def ukgill2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Fluid Ounces."""
return round(_volume * 4.8038, _round)
def ukgill2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Gills."""
return round(_volume * 1.2009, _round)
def ukgill2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Liquid Gallons."""
return round(_volume * 0.037530, _round)
def ukgill2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Liquid Pints."""
return round(_volume * 0.30024, _round)
def ukgill2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Liquid Quarts."""
return round(_volume * 0.15012, _round)
def ukgill2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Tablespoons."""
return round(_volume * 9.6076, _round)
def ukgill2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Teaspoons."""
return round(_volume * 28.823, _round)
# UK PINTS #
def ukpint2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Canadian Cups."""
return round(_volume * 2.5000, _round)
def ukpint2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Cubic Inches."""
return round(_volume * 34.677, _round)
def ukpint2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Cubic Meters."""
return round(_volume * 0.00056824639163541312, _round)
def ukpint2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Liters."""
return round(_volume * 0.56824639163541311513, _round)
def ukpint2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Metric Cups."""
return round(_volume * 2.2730, _round)
def ukpint2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Metric Tablespoons."""
return round(_volume * 37.884, _round)
def ukpint2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Metric Teaspoons."""
return round(_volume * 113.65, _round)
def ukpint2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> UK Fluid Ounces."""
return round(_volume * 20.000, _round)
def ukpint2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> UK Gallons."""
return round(_volume * 0.12500, _round)
def ukpint2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> UK Gills."""
return round(_volume * 4.0000, _round)
def ukpint2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> UK Quarts."""
return round(_volume * 0.50000, _round)
def ukpint2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> UK Tablespoons."""
return round(_volume * 40.000, _round)
def ukpint2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> UK Teaspoons."""
return round(_volume * 160.00, _round)
def ukpint2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Cups."""
return round(_volume * 2.4019, _round)
def ukpint2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Dry Gallons."""
return round(_volume * 0.0011351, _round)
def ukpint2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Dry Pints."""
return round(_volume * 1.0321, _round)
def ukpint2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Dry Quarts."""
return round(_volume * 0.51603, _round)
def ukpint2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Fluid Ounces."""
return round(_volume * 19.215, _round)
def ukpint2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Gills."""
return round(_volume * 4.8038, _round)
def ukpint2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Liquid Gallons."""
return round(_volume * 0.15012, _round)
def ukpint2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Liquid Pints."""
return round(_volume * 1.2009, _round)
def ukpint2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Liquid Quarts."""
return round(_volume * 0.60047, _round)
def ukpint2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Tablespoons."""
return round(_volume * 38.430, _round)
def ukpint2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Teaspoons."""
return round(_volume * 115.29, _round)
# UK QUARTS #
def ukquart2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Canadian Cups."""
return round(_volume * 5.0000, _round)
def ukquart2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Cubic Inches."""
return round(_volume * 69.355, _round)
def ukquart2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Cubic Meters."""
return round(_volume * 0.00113651861617493295, _round)
def ukquart2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Liters."""
return round(_volume * 1.1365186161749329454, _round)
def ukquart2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Metric Cups."""
return round(_volume * 4.5461, _round)
def ukquart2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Metric Tablespoons."""
return round(_volume * 75.768, _round)
def ukquart2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Metric Teaspoons."""
return round(_volume * 227.30, _round)
def ukquart2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> UK Fluid Ounces."""
return round(_volume * 40.000, _round)
def ukquart2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> UK Gallons."""
return round(_volume * 0.25000, _round)
def ukquart2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> UK Gills."""
return round(_volume * 8.0000, _round)
def ukquart2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> UK Pints."""
return round(_volume * 2.0000, _round)
def ukquart2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> UK Tablespoons."""
return round(_volume * 80.000, _round)
def ukquart2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> UK Teaspoons."""
return round(_volume * 320.00, _round)
def ukquart2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Cups."""
return round(_volume * 4.8038, _round)
def ukquart2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Dry Gallons."""
return round(_volume * 0.25801, _round)
def ukquart2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Dry Pints."""
return round(_volume * 2.0641, _round)
def ukquart2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Dry Quarts."""
return round(_volume * 1.0321, _round)
def ukquart2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Fluid Ounces."""
return round(_volume * 38.430, _round)
def ukquart2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Gills."""
return round(_volume * 9.6076, _round)
def ukquart2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Liquid Gallons."""
return round(_volume * 0.30024, _round)
def ukquart2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Liquid Pints."""
return round(_volume * 2.4019, _round)
def ukquart2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Liquid Quarts."""
return round(_volume * 1.2009, _round)
def ukquart2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Tablespoons."""
return round(_volume * 76.861, _round)
def ukquart2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Teaspoons."""
return round(_volume * 230.58, _round)
# UK TABLESPOONS #
def uktablespoon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Canadian Cups."""
return round(_volume * 0.062500, _round)
def uktablespoon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Cubic Inches."""
return round(_volume * 0.86694, _round)
def uktablespoon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Cubic Meters."""
return round(_volume * 0.00001420656343230573, _round)
def uktablespoon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Liters."""
return round(_volume * 0.01420656343230572525, _round)
def uktablespoon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Metric Cups."""
return round(_volume * 0.056826, _round)
def uktablespoon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Metric Tablespoons."""
return round(_volume * 0.94710, _round)
def uktablespoon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Metric Teaspoons."""
return round(_volume * 2.8413, _round)
def uktablespoon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> UK Fluid Ounces."""
return round(_volume * 0.50000, _round)
def uktablespoon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> UK Gallons."""
return round(_volume * 0.0031250, _round)
def uktablespoon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> UK Gills."""
return round(_volume * 0.10000, _round)
def uktablespoon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> UK Pints."""
return round(_volume * 0.025000, _round)
def uktablespoon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> UK Quarts."""
return round(_volume * 0.012500, _round)
def uktablespoon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> UK Teaspoons."""
return round(_volume * 4.0000, _round)
def uktablespoon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Cups."""
return round(_volume * 0.060048, _round)
def uktablespoon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Dry Gallons."""
return round(_volume * 0.0032252, _round)
def uktablespoon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Dry Pints."""
return round(_volume * 0.025801, _round)
def uktablespoon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Dry Quarts."""
return round(_volume * 0.012901, _round)
def uktablespoon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Fluid Ounces."""
return round(_volume * 0.48038, _round)
def uktablespoon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Gills."""
return round(_volume * 0.12010, _round)
def uktablespoon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Liquid Gallons."""
return round(_volume * 0.0037530, _round)
def uktablespoon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Liquid Pints."""
return round(_volume * 0.030024, _round)
def uktablespoon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Liquid Quarts."""
return round(_volume * 0.015012, _round)
def uktablespoon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Tablespoons."""
return round(_volume * 0.96076, _round)
def uktablespoon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Teaspoons."""
return round(_volume * 2.8823, _round)
# UK TEASPOONS #
def ukteaspoon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Canadian Cups."""
return round(_volume * 0.015625, _round)
def ukteaspoon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Cubic Inches."""
return round(_volume * 0.21673, _round)
def ukteaspoon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Cubic Meters."""
return round(_volume * 0.00000355164085807643, _round)
def ukteaspoon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Liters."""
return round(_volume * 0.00355164085807643131, _round)
def ukteaspoon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Metric Cups."""
return round(_volume * 0.014207, _round)
def ukteaspoon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Metric Tablespoons."""
return round(_volume * 0.23678, _round)
def ukteaspoon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Metric Teaspoons."""
return round(_volume * 0.71033, _round)
def ukteaspoon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> UK Fluid Ounces."""
return round(_volume * 0.12500, _round)
def ukteaspoon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> UK Gallons."""
return round(_volume * 0.00078125, _round)
def ukteaspoon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> UK Gills."""
return round(_volume * 0.025000, _round)
def ukteaspoon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> UK Pints."""
return round(_volume * 0.0062500, _round)
def ukteaspoon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> UK Quarts."""
return round(_volume * 0.0031250, _round)
def ukteaspoon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> UK Tablespoons."""
return round(_volume * 0.25000, _round)
def ukteaspoon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Cups."""
return round(_volume * 0.015012, _round)
def ukteaspoon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Dry Gallons."""
return round(_volume * 0.00080630, _round)
def ukteaspoon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Dry Pints."""
return round(_volume * 0.0064504, _round)
def ukteaspoon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Dry Quarts."""
return round(_volume * 0.0032252, _round)
def ukteaspoon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Fluid Ounces."""
return round(_volume * 0.12010, _round)
def ukteaspoon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Gills."""
return round(_volume * 0.030024, _round)
def ukteaspoon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Liquid Gallons."""
return round(_volume * 0.00093824, _round)
def ukteaspoon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Liquid Pints."""
return round(_volume * 0.0075060, _round)
def ukteaspoon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Liquid Quarts."""
return round(_volume * 0.0037530, _round)
def ukteaspoon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Tablespoons."""
return round(_volume * 0.24019, _round)
def ukteaspoon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Teaspoons."""
return round(_volume * 0.72057, _round)
# US CUPS #
def uscup2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Canadian Cups."""
return round(_volume * 1.0408, _round)
def uscup2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Cubic Inches."""
return round(_volume * 14.438, _round)
def uscup2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Cubic Meters."""
return round(_volume * 0.0002365855966688748, _round)
def uscup2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Liters."""
return round(_volume * 0.2365855966688747989, _round)
def uscup2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Metric Cups."""
return round(_volume * 0.94635, _round)
def uscup2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Metric Tablespoons."""
return round(_volume * 15.773, _round)
def uscup2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Metric Teaspoons."""
return round(_volume * 47.318, _round)
def uscup2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Fluid Ounces."""
return round(_volume * 8.3267, _round)
def uscup2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Gallons."""
return round(_volume * 0.052042, _round)
def uscup2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Gills."""
return round(_volume * 1.6653, _round)
def uscup2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Pints."""
return round(_volume * 0.41634, _round)
def uscup2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Quarts."""
return round(_volume * 0.20817, _round)
def uscup2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Tablespoons."""
return round(_volume * 16.653, _round)
def uscup2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Teaspoons."""
return round(_volume * 66.614, _round)
def uscup2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Dry Gallons."""
return round(_volume * 0.053710, _round)
def uscup2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Dry Pints."""
return round(_volume * 0.42968, _round)
def uscup2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Dry Quarts."""
return round(_volume * 0.21484, _round)
def uscup2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Fluid Ounces."""
return round(_volume * 8.0000, _round)
def uscup2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Gills."""
return round(_volume * 2.0000, _round)
def uscup2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Liquid Gallons."""
return round(_volume * 0.062500, _round)
def uscup2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Liquid Pints."""
return round(_volume * 0.50000, _round)
def uscup2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Liquid Quarts."""
return round(_volume * 0.25000, _round)
def uscup2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Tablespoons."""
return round(_volume * 16.000, _round)
def uscup2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Teaspoons."""
return round(_volume * 48.000, _round)
# US DRY GALLONS #
def usdrygallon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Canadian Cups."""
return round(_volume * 19.379, _round)
def usdrygallon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Cubic Inches."""
return round(_volume * 268.80, _round)
def usdrygallon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Cubic Meters."""
return round(_volume * 0.00440489824685049775, _round)
def usdrygallon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Liters."""
return round(_volume * 4.4048982468504977535, _round)
def usdrygallon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Metric Cups."""
return round(_volume * 17.620, _round)
def usdrygallon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Metric Tablespoons."""
return round(_volume * 293.66, _round)
def usdrygallon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Metric Teaspoons."""
return round(_volume * 880.98, _round)
def usdrygallon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Fluid Ounces."""
return round(_volume * 155.03, _round)
def usdrygallon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Gallons."""
return round(_volume * 0.96894, _round)
def usdrygallon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Gills."""
return round(_volume * 31.006, _round)
def usdrygallon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Pints."""
return round(_volume * 7.7515, _round)
def usdrygallon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Quarts."""
return round(_volume * 3.8758, _round)
def usdrygallon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Tablespoons."""
return round(_volume * 310.06, _round)
def usdrygallon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Teaspoons."""
return round(_volume * 1240.2, _round)
def usdrygallon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Cups."""
return round(_volume * 18.618, _round)
def usdrygallon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Dry Pints."""
return round(_volume * 8.0000, _round)
def usdrygallon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Dry Quarts."""
return round(_volume * 4.0000, _round)
def usdrygallon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Fluid Ounces."""
return round(_volume * 148.95, _round)
def usdrygallon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Gills."""
return round(_volume * 37.237, _round)
def usdrygallon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Liquid Gallons."""
return round(_volume * 1.1636, _round)
def usdrygallon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Liquid Pints."""
return round(_volume * 9.3092, _round)
def usdrygallon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Liquid Quarts."""
return round(_volume * 4.6546, _round)
def usdrygallon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Tablespoons."""
return round(_volume * 297.89, _round)
def usdrygallon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Teaspoons."""
return round(_volume * 893.68, _round)
# US DRY PINTS #
def usdrypint2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Canadian Cups."""
return round(_volume * 2.4223, _round)
def usdrypint2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Cubic Inches."""
return round(_volume * 33.600, _round)
def usdrypint2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Cubic Meters."""
return round(_volume * 0.00055060015416804317, _round)
def usdrypint2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Liters."""
return round(_volume * 0.55060015416804316705, _round)
def usdrypint2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Metric Cups."""
return round(_volume * 2.2024, _round)
def usdrypint2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Metric Tablespoons."""
return round(_volume * 36.707, _round)
def usdrypint2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Metric Teaspoons."""
return round(_volume * 110.12, _round)
def usdrypint2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Fluid Ounces."""
return round(_volume * 19.379, _round)
def usdrypint2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Gallons."""
return round(_volume * 0.12112, _round)
def usdrypint2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Gills."""
return round(_volume * 3.8758, _round)
def usdrypint2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Pints."""
return round(_volume * 0.96894, _round)
def usdrypint2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Quarts."""
return round(_volume * 0.48447, _round)
def usdrypint2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Tablespoons."""
return round(_volume * 38.758, _round)
def usdrypint2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Teaspoons."""
return round(_volume * 155.03, _round)
def usdrypint2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Cups."""
return round(_volume * 2.3273, _round)
def usdrypint2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Dry Gallons."""
return round(_volume * 0.12500, _round)
def usdrypint2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Dry Quarts."""
return round(_volume * 0.50000, _round)
def usdrypint2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Fluid Ounces."""
return round(_volume * 18.618, _round)
def usdrypint2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Gills."""
return round(_volume * 4.6546, _round)
def usdrypint2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Liquid Gallons."""
return round(_volume * 0.14546, _round)
def usdrypint2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Liquid Pints."""
return round(_volume * 1.1636, _round)
def usdrypint2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Liquid Quarts."""
return round(_volume * 0.58182, _round)
def usdrypint2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Tablespoons."""
return round(_volume * 37.237, _round)
def usdrypint2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Teaspoons."""
return round(_volume * 111.71, _round)
# US DRY QUARTS #
def usdryquart2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Canadian Cups."""
return round(_volume * 4.8447, _round)
def usdryquart2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Cubic Inches."""
return round(_volume * 67.201, _round)
def usdryquart2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Cubic Meters."""
return round(_volume * 0.00110122456171262444, _round)
def usdryquart2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Liters."""
return round(_volume * 1.10122456171262443838, _round)
def usdryquart2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Metric Cups."""
return round(_volume * 4.4049, _round)
def usdryquart2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Metric Tablespoons."""
return round(_volume * 73.415, _round)
def usdryquart2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Metric Teaspoons."""
return round(_volume * 220.24, _round)
def usdryquart2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Fluid Ounces."""
return round(_volume * 38.758, _round)
def usdryquart2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Gallons."""
return round(_volume * 0.24223, _round)
def usdryquart2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Gills."""
return round(_volume * 7.7515, _round)
def usdryquart2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Pints."""
return round(_volume * 1.9379, _round)
def usdryquart2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Quarts."""
return round(_volume * 0.96894, _round)
def usdryquart2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Tablespoons."""
return round(_volume * 77.515, _round)
def usdryquart2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Teaspoons."""
return round(_volume * 310.06, _round)
def usdryquart2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Cups."""
return round(_volume * 4.6546, _round)
def usdryquart2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Dry Gallons."""
return round(_volume * 0.25000, _round)
def usdryquart2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Dry Pints."""
return round(_volume * 2.0000, _round)
def usdryquart2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Fluid Ounces."""
return round(_volume * 37.237, _round)
def usdryquart2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Gills."""
return round(_volume * 9.3092, _round)
def usdryquart2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Liquid Gallons."""
return round(_volume * 0.29091, _round)
def usdryquart2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Liquid Pints."""
return round(_volume * 2.3273, _round)
def usdryquart2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Liquid Quarts."""
return round(_volume * 1.1636, _round)
def usdryquart2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Tablespoons."""
return round(_volume * 74.473, _round)
def usdryquart2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Teaspoons."""
return round(_volume * 223.42, _round)
# US FLUID OUNCES #
def usfluidounce2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Canadian Cups."""
return round(_volume * 0.13011, _round)
def usfluidounce2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Cubic Inches."""
return round(_volume * 1.8047, _round)
def usfluidounce2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Cubic Meters."""
return round(_volume * 0.00002957354941740108, _round)
def usfluidounce2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Liters."""
return round(_volume * 0.02957354941740107648, _round)
def usfluidounce2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Metric Cups."""
return round(_volume * 0.11829, _round)
def usfluidounce2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Metric Tablespoons."""
return round(_volume * 1.9716, _round)
def usfluidounce2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Metric Teaspoons."""
return round(_volume * 5.9147, _round)
def usfluidounce2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Fluid Ounces."""
return round(_volume * 1.0408, _round)
def usfluidounce2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Gallons."""
return round(_volume * 0.0065053, _round)
def usfluidounce2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Gills."""
return round(_volume * 0.20817, _round)
def usfluidounce2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Pints."""
return round(_volume * 0.052042, _round)
def usfluidounce2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Quarts."""
return round(_volume * 0.026021, _round)
def usfluidounce2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Tablespoons."""
return round(_volume * 2.0817, _round)
def usfluidounce2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Teaspoons."""
return round(_volume * 8.3267, _round)
def usfluidounce2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Cups."""
return round(_volume * 0.12500, _round)
def usfluidounce2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Dry Gallons."""
return round(_volume * 0.0067138, _round)
def usfluidounce2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Dry Pints."""
return round(_volume * 0.053710, _round)
def usfluidounce2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Dry Quarts."""
return round(_volume * 0.026855, _round)
def usfluidounce2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Gills."""
return round(_volume * 0.25000, _round)
def usfluidounce2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Liquid Gallons."""
return round(_volume * 0.0078125, _round)
def usfluidounce2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Liquid Pints."""
return round(_volume * 0.062500, _round)
def usfluidounce2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Liquid Quarts."""
return round(_volume * 0.031250, _round)
def usfluidounce2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Tablespoons."""
return round(_volume * 2.0000, _round)
def usfluidounce2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Teaspoons."""
return round(_volume * 6.0000, _round)
# US GILLS #
def usgill2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Canadian Cups."""
return round(_volume * 0.52042, _round)
def usgill2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Cubic Inches."""
return round(_volume * 7.2187, _round)
def usgill2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Cubic Meters."""
return round(_volume * 0.00011829419766960431, _round)
def usgill2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Liters."""
return round(_volume * 0.11829419766960430591, _round)
def usgill2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Metric Cups."""
return round(_volume * 0.47318, _round)
def usgill2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Metric Tablespoons."""
return round(_volume * 7.8863, _round)
def usgill2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Metric Teaspoons."""
return round(_volume * 23.659, _round)
def usgill2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Fluid Ounces."""
return round(_volume * 4.1634, _round)
def usgill2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Gallons."""
return round(_volume * 0.026021, _round)
def usgill2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Gills."""
return round(_volume * 0.83267, _round)
def usgill2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Pints."""
return round(_volume * 0.20817, _round)
def usgill2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Quarts."""
return round(_volume * 0.10408, _round)
def usgill2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Tablespoons."""
return round(_volume * 8.3267, _round)
def usgill2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Teaspoons."""
return round(_volume * 33.307, _round)
def usgill2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Cups."""
return round(_volume * 0.50000, _round)
def usgill2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Dry Gallons."""
return round(_volume * 0.026855, _round)
def usgill2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Dry Pints."""
return round(_volume * 0.21484, _round)
def usgill2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Dry Quarts."""
return round(_volume * 0.10742, _round)
def usgill2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Fluid Ounces."""
return round(_volume * 4.0000, _round)
def usgill2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Liquid Gallons."""
return round(_volume * 0.031250, _round)
def usgill2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Liquid Pints."""
return round(_volume * 0.25000, _round)
def usgill2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Liquid Quarts."""
return round(_volume * 0.12500, _round)
def usgill2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Tablespoons."""
return round(_volume * 8.0000, _round)
def usgill2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Teaspoons."""
return round(_volume * 24.000, _round)
# US LIQUID GALLONS #
def usliquidgallon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Canadian Cups."""
return round(_volume * 16.653, _round)
def usliquidgallon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Cubic Inches."""
return round(_volume * 231.00, _round)
def usliquidgallon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Cubic Meters."""
return round(_volume * 0.00378544119317106409, _round)
def usliquidgallon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Liters."""
return round(_volume * 3.78544119317106408752, _round)
def usliquidgallon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Metric Cups."""
return round(_volume * 15.142, _round)
def usliquidgallon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Metric Tablespoons."""
return round(_volume * 252.36, _round)
def usliquidgallon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Metric Teaspoons."""
return round(_volume * 757.08, _round)
def usliquidgallon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Fluid Ounces."""
return round(_volume * 133.23, _round)
def usliquidgallon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Gallons."""
return round(_volume * 0.83267, _round)
def usliquidgallon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Gills."""
return round(_volume * 26.646, _round)
def usliquidgallon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Pints."""
return round(_volume * 6.6614, _round)
def usliquidgallon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Quarts."""
return round(_volume * 3.3307, _round)
def usliquidgallon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Tablespoons."""
return round(_volume * 266.46, _round)
def usliquidgallon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Teaspoons."""
return round(_volume * 1065.8, _round)
def usliquidgallon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Cups."""
return round(_volume * 16.000, _round)
def usliquidgallon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Dry Gallons."""
return round(_volume * 0.85937, _round)
def usliquidgallon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Dry Pints."""
return round(_volume * 6.8749, _round)
def usliquidgallon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Dry Quarts."""
return round(_volume * 3.4375, _round)
def usliquidgallon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Fluid Ounces."""
return round(_volume * 128.00, _round)
def usliquidgallon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Gills."""
return round(_volume * 32.000, _round)
def usliquidgallon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Liquid Pints."""
return round(_volume * 8.0000, _round)
def usliquidgallon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Liquid Quarts."""
return round(_volume * 4.0000, _round)
def usliquidgallon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Tablespoons."""
return round(_volume * 256.00, _round)
def usliquidgallon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Teaspoons."""
return round(_volume * 768.00, _round)
# US LIQUID PINTS #
def usliquidpint2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Canadian Cups."""
return round(_volume * 2.0817, _round)
def usliquidpint2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Cubic Inches."""
return round(_volume * 28.875, _round)
def usliquidpint2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Cubic Meters."""
return round(_volume * 0.0004731711933377496, _round)
def usliquidpint2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Liters."""
return round(_volume * 0.4731711933377495978, _round)
def usliquidpint2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Metric Cups."""
return round(_volume * 1.8927, _round)
def usliquidpint2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Metric Tablespoons."""
return round(_volume * 31.545, _round)
def usliquidpint2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Metric Teaspoons."""
return round(_volume * 94.635, _round)
def usliquidpint2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Fluid Ounces."""
return round(_volume * 16.653, _round)
def usliquidpint2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Gallons."""
return round(_volume * 0.10408, _round)
def usliquidpint2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Gills."""
return round(_volume * 3.3307, _round)
def usliquidpint2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Pints."""
return round(_volume * 0.83267, _round)
def usliquidpint2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Quarts."""
return round(_volume * 0.41634, _round)
def usliquidpint2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Tablespoons."""
return round(_volume * 33.307, _round)
def usliquidpint2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Teaspoons."""
return round(_volume * 133.23, _round)
def usliquidpint2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Cups."""
return round(_volume * 2.0000, _round)
def usliquidpint2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Dry Gallons."""
return round(_volume * 0.10742, _round)
def usliquidpint2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Dry Pints."""
return round(_volume * 0.85937, _round)
def usliquidpint2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Dry Quarts."""
return round(_volume * 0.42968, _round)
def usliquidpint2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Fluid Ounces."""
return round(_volume * 16.000, _round)
def usliquidpint2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Gills."""
return round(_volume * 4.0000, _round)
def usliquidpint2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Liquid Gallons."""
return round(_volume * 0.12500, _round)
def usliquidpint2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Liquid Quarts."""
return round(_volume * 0.50000, _round)
def usliquidpint2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Tablespoons."""
return round(_volume * 32.000, _round)
def usliquidpint2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Teaspoons."""
return round(_volume * 96.000, _round)
# US LIQUID QUARTS #
def usliquidquart2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Canadian Cups."""
return round(_volume * 4.1634, _round)
def usliquidquart2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Cubic Inches."""
return round(_volume * 57.750, _round)
def usliquidquart2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Cubic Meters."""
return round(_volume * 0.0009463423866754992, _round)
def usliquidquart2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Liters."""
return round(_volume * 0.94634238667549919561, _round)
def usliquidquart2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Metric Cups."""
return round(_volume * 3.7854, _round)
def usliquidquart2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Metric Tablespoons."""
return round(_volume * 63.090, _round)
def usliquidquart2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Metric Teaspoons."""
return round(_volume * 189.27, _round)
def usliquidquart2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Fluid Ounces."""
return round(_volume * 33.307, _round)
def usliquidquart2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Gallons."""
return round(_volume * 0.20817, _round)
def usliquidquart2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Gills."""
return round(_volume * 6.6614, _round)
def usliquidquart2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Pints."""
return round(_volume * 1.6653, _round)
def usliquidquart2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Quarts."""
return round(_volume * 0.83267, _round)
def usliquidquart2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Tablespoons."""
return round(_volume * 66.614, _round)
def usliquidquart2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Teaspoons."""
return round(_volume * 266.46, _round)
def usliquidquart2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Cups."""
return round(_volume * 4.0000, _round)
def usliquidquart2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Dry Gallons."""
return round(_volume * 0.21484, _round)
def usliquidquart2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Dry Pints."""
return round(_volume * 1.7187, _round)
def usliquidquart2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Dry Quarts."""
return round(_volume * 0.85937, _round)
def usliquidquart2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Fluid Ounces."""
return round(_volume * 32.000, _round)
def usliquidquart2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Gills."""
return round(_volume * 8.0000, _round)
def usliquidquart2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Liquid Gallons."""
return round(_volume * 0.25000, _round)
def usliquidquart2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Liquid Pints."""
return round(_volume * 2.0000, _round)
def usliquidquart2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Tablespoons."""
return round(_volume * 64.000, _round)
def usliquidquart2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Teaspoons."""
return round(_volume * 192.00, _round)
# US TABLESPOONS #
def ustablespoon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Canadian Cups."""
return round(_volume * 0.065053, _round)
def ustablespoon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Cubic Inches."""
return round(_volume * 0.90234, _round)
def ustablespoon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Cubic Meters."""
return round(_volume * 0.00001478677470870054, _round)
def ustablespoon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Liters."""
return round(_volume * 0.01478677470870053824, _round)
def ustablespoon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Metric Cups."""
return round(_volume * 0.059147, _round)
def ustablespoon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Metric Tablespoons."""
return round(_volume * 0.98578, _round)
def ustablespoon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Metric Teaspoons."""
return round(_volume * 2.9574, _round)
def ustablespoon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Fluid Ounces."""
return round(_volume * 0.52042, _round)
def ustablespoon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Gallons."""
return round(_volume * 0.0032526, _round)
def ustablespoon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Gills."""
return round(_volume * 0.10408, _round)
def ustablespoon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Pints."""
return round(_volume * 0.026021, _round)
def ustablespoon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Quarts."""
return round(_volume * 0.013011, _round)
def ustablespoon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Tablespoons."""
return round(_volume * 1.0408, _round)
def ustablespoon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Teaspoons."""
return round(_volume * 4.1634, _round)
def ustablespoon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Cups."""
return round(_volume * 0.062500, _round)
def ustablespoon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Dry Gallons."""
return round(_volume * 0.0033569, _round)
def ustablespoon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Dry Pints."""
return round(_volume * 0.026855, _round)
def ustablespoon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Dry Quarts."""
return round(_volume * 0.013428, _round)
def ustablespoon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Fluid Ounces."""
return round(_volume * 0.50000, _round)
def ustablespoon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Gills."""
return round(_volume * 0.12500, _round)
def ustablespoon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Liquid Gallons."""
return round(_volume * 0.0039062, _round)
def ustablespoon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Liquid Pints."""
return round(_volume * 0.031250, _round)
def ustablespoon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Liquid Quarts."""
return round(_volume * 0.015625, _round)
def ustablespoon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Teaspoons."""
return round(_volume * 3.0000, _round)
# US TEASPOONS #
def usteaspoon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Canadian Cups."""
return round(_volume * 0.021684, _round)
def usteaspoon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Cubic Inches."""
return round(_volume * 0.30078, _round)
def usteaspoon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Cubic Meters."""
return round(_volume * 0.00000492902208201893, _round)
def usteaspoon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Liters."""
return round(_volume * 0.00492902208201892744, _round)
def usteaspoon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Metric Cups."""
return round(_volume * 0.019716, _round)
def usteaspoon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Metric Tablespoons."""
return round(_volume * 0.32859, _round)
def usteaspoon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Metric Teaspoons."""
return round(_volume * 0.98578, _round)
def usteaspoon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Fluid Ounces."""
return round(_volume * 0.17347, _round)
def usteaspoon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Gallons."""
return round(_volume * 0.0010842, _round)
def usteaspoon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Gills."""
return round(_volume * 0.034695, _round)
def usteaspoon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Pints."""
return round(_volume * 0.0086737, _round)
def usteaspoon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Quarts."""
return round(_volume * 0.0043368, _round)
def usteaspoon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Tablespoons."""
return round(_volume * 0.34695, _round)
def usteaspoon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Teaspoons."""
return round(_volume * 1.3878, _round)
def usteaspoon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Cups."""
return round(_volume * 0.020833, _round)
def usteaspoon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Dry Gallons."""
return round(_volume * 0.0011190, _round)
def usteaspoon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Dry Pints."""
return round(_volume * 0.0089517, _round)
def usteaspoon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Dry Quarts."""
return round(_volume * 0.0044759, _round)
def usteaspoon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Fluid Ounces."""
return round(_volume * 0.16667, _round)
def usteaspoon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Gills."""
return round(_volume * 0.041667, _round)
def usteaspoon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Liquid Gallons."""
return round(_volume * 0.0013021, _round)
def usteaspoon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Liquid Pints."""
return round(_volume * 0.010417, _round)
def usteaspoon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Liquid Quarts."""
return round(_volume * 0.0052083, _round)
def usteaspoon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Tablespoons."""
return round(_volume * 0.33333, _round)
|
lgpl-3.0
|
fingeronthebutton/RIDE
|
src/robotide/lib/robot/utils/ordereddict.py
|
1047
|
4094
|
# Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
|
apache-2.0
|
maleficarium/youtube-dl
|
youtube_dl/extractor/ellentv.py
|
11
|
2708
|
# coding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class EllenTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?:ellentv|ellentube)\.com/videos/(?P<id>[a-z0-9_-]+)'
_TEST = {
'url': 'http://www.ellentv.com/videos/0-ipq1gsai/',
'md5': '4294cf98bc165f218aaa0b89e0fd8042',
'info_dict': {
'id': '0_ipq1gsai',
'ext': 'mov',
'title': 'Fast Fingers of Fate',
'description': 'md5:3539013ddcbfa64b2a6d1b38d910868a',
'timestamp': 1428035648,
'upload_date': '20150403',
'uploader_id': 'batchUser',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://widgets.ellentube.com/videos/%s' % video_id,
video_id)
partner_id = self._search_regex(
r"var\s+partnerId\s*=\s*'([^']+)", webpage, 'partner id')
kaltura_id = self._search_regex(
[r'id="kaltura_player_([^"]+)"',
r"_wb_entry_id\s*:\s*'([^']+)",
r'data-kaltura-entry-id="([^"]+)'],
webpage, 'kaltura id')
return self.url_result('kaltura:%s:%s' % (partner_id, kaltura_id), 'Kaltura')
class EllenTVClipsIE(InfoExtractor):
IE_NAME = 'EllenTV:clips'
_VALID_URL = r'https?://(?:www\.)?ellentv\.com/episodes/(?P<id>[a-z0-9_-]+)'
_TEST = {
'url': 'http://www.ellentv.com/episodes/meryl-streep-vanessa-hudgens/',
'info_dict': {
'id': 'meryl-streep-vanessa-hudgens',
'title': 'Meryl Streep, Vanessa Hudgens',
},
'playlist_mincount': 7,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
playlist = self._extract_playlist(webpage)
return {
'_type': 'playlist',
'id': playlist_id,
'title': self._og_search_title(webpage),
'entries': self._extract_entries(playlist)
}
def _extract_playlist(self, webpage):
json_string = self._search_regex(r'playerView.addClips\(\[\{(.*?)\}\]\);', webpage, 'json')
try:
return json.loads('[{' + json_string + '}]')
except ValueError as ve:
raise ExtractorError('Failed to download JSON', cause=ve)
def _extract_entries(self, playlist):
return [
self.url_result(
'kaltura:%s:%s' % (item['kaltura_partner_id'], item['kaltura_entry_id']),
'Kaltura')
for item in playlist]
|
unlicense
|
lisael/pg-django
|
django/contrib/auth/middleware.py
|
101
|
3501
|
from django.contrib import auth
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import SimpleLazyObject
def get_user(request):
if not hasattr(request, '_cached_user'):
request._cached_user = auth.get_user(request)
return request._cached_user
class AuthenticationMiddleware(object):
def process_request(self, request):
assert hasattr(request, 'session'), "The Django authentication middleware requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'."
request.user = SimpleLazyObject(lambda: get_user(request))
class RemoteUserMiddleware(object):
"""
Middleware for utilizing Web-server-provided authentication.
If request.user is not authenticated, then this middleware attempts to
authenticate the username passed in the ``REMOTE_USER`` request header.
If authentication is successful, the user is automatically logged in to
persist the user in the session.
The header used is configurable and defaults to ``REMOTE_USER``. Subclass
this class and change the ``header`` attribute if you need to use a
different header.
"""
# Name of request header to grab username from. This will be the key as
# used in the request.META dictionary, i.e. the normalization of headers to
# all uppercase and the addition of "HTTP_" prefix apply.
header = "REMOTE_USER"
def process_request(self, request):
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the RemoteUserMiddleware class.")
try:
username = request.META[self.header]
except KeyError:
# If specified header doesn't exist then return (leaving
# request.user set to AnonymousUser by the
# AuthenticationMiddleware).
return
# If the user is already authenticated and that user is the user we are
# getting passed in the headers, then the correct user is already
# persisted in the session and we don't need to continue.
if request.user.is_authenticated():
if request.user.username == self.clean_username(username, request):
return
# We are seeing this user for the first time in this session, attempt
# to authenticate the user.
user = auth.authenticate(remote_user=username)
if user:
# User is valid. Set request.user and persist user in the session
# by logging the user in.
request.user = user
auth.login(request, user)
def clean_username(self, username, request):
"""
Allows the backend to clean the username, if the backend defines a
clean_username method.
"""
backend_str = request.session[auth.BACKEND_SESSION_KEY]
backend = auth.load_backend(backend_str)
try:
username = backend.clean_username(username)
except AttributeError: # Backend has no clean_username method.
pass
return username
|
bsd-3-clause
|
bavardage/statsmodels
|
statsmodels/stats/tests/test_groups_sw.py
|
34
|
2750
|
# -*- coding: utf-8 -*-
"""Test for a helper function for PanelHAC robust covariance
the functions should be rewritten to make it more efficient
Created on Thu May 17 21:09:41 2012
Author: Josef Perktold
"""
import numpy as np
from numpy.testing import assert_equal, assert_raises
import statsmodels.stats.sandwich_covariance as sw
from statsmodels.tools.grouputils import Group, GroupSorted
class CheckPanelLagMixin(object):
def calculate(self):
self.g = g = GroupSorted(self.gind) # pylint: disable-msg=W0201
self.alla = [(lag, sw.lagged_groups(self.x, lag, g.groupidx)) # pylint: disable-msg=W0201
for lag in range(5)]
def test_values(self):
for lag, (y0, ylag) in self.alla:
assert_equal(y0, self.alle[lag].T)
assert_equal(y0, ylag + lag)
def test_raises(self):
mlag = self.mlag
assert_raises(ValueError, sw.lagged_groups, self.x, mlag,
self.g.groupidx)
class TestBalanced(CheckPanelLagMixin):
def __init__(self):
self.gind = np.repeat([0,1,2], 5)
self.mlag = 5
x = np.arange(15)
x += 10**self.gind
self.x = x[:,None]
#expected result
self.alle = {
0 : np.array([[ 1, 2, 3, 4, 5, 15, 16, 17, 18, 19,
110, 111, 112, 113, 114]]),
1 : np.array([[ 2, 3, 4, 5, 16, 17, 18, 19, 111, 112,
113, 114]]),
2 : np.array([[ 3, 4, 5, 17, 18, 19, 112, 113, 114]]),
3 : np.array([[ 4, 5, 18, 19, 113, 114]]),
4 : np.array([[ 5, 19, 114]])
}
self.calculate()
class TestUnBalanced(CheckPanelLagMixin):
def __init__(self):
self.gind = gind = np.repeat([0,1,2], [3, 5, 10])
self.mlag = 10 #maxlag
x = np.arange(18)
x += 10**gind
self.x = x[:,None]
#expected result
self.alle = {
0 : np.array([[ 1, 2, 3, 13, 14, 15, 16, 17, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117]]),
1 : np.array([[ 2, 3, 14, 15, 16, 17, 109, 110, 111, 112,
113, 114, 115, 116, 117]]),
2 : np.array([[ 3, 15, 16, 17, 110, 111, 112, 113, 114, 115,
116, 117]]),
3 : np.array([[ 16, 17, 111, 112, 113, 114, 115, 116, 117]]),
4 : np.array([[ 17, 112, 113, 114, 115, 116, 117]]),
5 : np.array([[113, 114, 115, 116, 117]]),
}
self.calculate()
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb-failures'], exit=False)
|
bsd-3-clause
|
keithroe/vtkoptix
|
Imaging/Core/Testing/Python/TestAllFlips.py
|
20
|
1214
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Image pipeline
reader = vtk.vtkImageReader()
reader.GetExecutive().SetReleaseDataFlag(0,0)
reader.SetDataByteOrderToLittleEndian()
reader.SetDataExtent(0,63,0,63,1,93)
reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
reader.SetDataMask(0x7fff)
imageFloat = vtk.vtkImageCast()
imageFloat.SetInputConnection(reader.GetOutputPort())
imageFloat.SetOutputScalarTypeToFloat()
flipX = vtk.vtkImageFlip()
flipX.SetInputConnection(imageFloat.GetOutputPort())
flipX.SetFilteredAxis(0)
flipY = vtk.vtkImageFlip()
flipY.SetInputConnection(imageFloat.GetOutputPort())
flipY.SetFilteredAxis(1)
flipY.FlipAboutOriginOn()
imageAppend = vtk.vtkImageAppend()
imageAppend.AddInputConnection(imageFloat.GetOutputPort())
imageAppend.AddInputConnection(flipX.GetOutputPort())
imageAppend.AddInputConnection(flipY.GetOutputPort())
imageAppend.SetAppendAxis(0)
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(imageAppend.GetOutputPort())
viewer.SetZSlice(22)
viewer.SetColorWindow(2000)
viewer.SetColorLevel(1000)
#make interface
viewer.Render()
# --- end of script --
|
bsd-3-clause
|
googleapis/python-pubsublite
|
tests/unit/pubsublite/cloudpubsub/internal/single_partition_subscriber_test.py
|
1
|
10169
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from typing import Callable
from asynctest.mock import MagicMock, call
import pytest
from google.api_core.exceptions import FailedPrecondition
from google.cloud.pubsub_v1.subscriber.message import Message
from google.pubsub_v1 import PubsubMessage
from google.cloud.pubsublite.types import FlowControlSettings
from google.cloud.pubsublite.cloudpubsub.internal.ack_set_tracker import AckSetTracker
from google.cloud.pubsublite.cloudpubsub.internal.single_partition_subscriber import (
SinglePartitionSingleSubscriber,
)
from google.cloud.pubsublite.cloudpubsub.message_transformer import MessageTransformer
from google.cloud.pubsublite.cloudpubsub.nack_handler import NackHandler
from google.cloud.pubsublite.cloudpubsub.internal.single_subscriber import (
AsyncSingleSubscriber,
)
from google.cloud.pubsublite.internal.wire.subscriber import Subscriber
from google.cloud.pubsublite.internal.wire.subscriber_reset_handler import (
SubscriberResetHandler,
)
from google.cloud.pubsublite.testing.test_utils import make_queue_waiter
from google.cloud.pubsublite_v1 import Cursor, FlowControlRequest, SequencedMessage
# All test coroutines will be treated as marked.
pytestmark = pytest.mark.asyncio
def mock_async_context_manager(cm):
cm.__aenter__.return_value = cm
return cm
@pytest.fixture()
def underlying():
return mock_async_context_manager(MagicMock(spec=Subscriber))
@pytest.fixture()
def flow_control_settings():
return FlowControlSettings(1000, 1000)
@pytest.fixture()
def initial_flow_request(flow_control_settings):
return FlowControlRequest(
allowed_messages=flow_control_settings.messages_outstanding,
allowed_bytes=flow_control_settings.bytes_outstanding,
)
@pytest.fixture()
def ack_set_tracker():
return mock_async_context_manager(MagicMock(spec=AckSetTracker))
@pytest.fixture()
def nack_handler():
return MagicMock(spec=NackHandler)
@pytest.fixture()
def transformer():
result = MagicMock(spec=MessageTransformer)
result.transform.side_effect = lambda source: PubsubMessage(
message_id=str(source.cursor.offset)
)
return result
@pytest.fixture()
def subscriber(
underlying, flow_control_settings, ack_set_tracker, nack_handler, transformer
):
def subscriber_factory(reset_handler: SubscriberResetHandler):
return underlying
return SinglePartitionSingleSubscriber(
subscriber_factory,
flow_control_settings,
ack_set_tracker,
nack_handler,
transformer,
)
async def test_init(subscriber, underlying, ack_set_tracker, initial_flow_request):
async with subscriber:
underlying.__aenter__.assert_called_once()
ack_set_tracker.__aenter__.assert_called_once()
underlying.allow_flow.assert_called_once_with(initial_flow_request)
underlying.__aexit__.assert_called_once()
ack_set_tracker.__aexit__.assert_called_once()
async def test_failed_transform(subscriber, underlying, transformer):
async with subscriber:
transformer.transform.side_effect = FailedPrecondition("Bad message")
underlying.read.return_value = SequencedMessage()
with pytest.raises(FailedPrecondition):
await subscriber.read()
async def test_ack(
subscriber: AsyncSingleSubscriber, underlying, transformer, ack_set_tracker
):
ack_called_queue = asyncio.Queue()
ack_result_queue = asyncio.Queue()
ack_set_tracker.ack.side_effect = make_queue_waiter(
ack_called_queue, ack_result_queue
)
async with subscriber:
message_1 = SequencedMessage(cursor=Cursor(offset=1), size_bytes=5)
message_2 = SequencedMessage(cursor=Cursor(offset=2), size_bytes=10)
underlying.read.return_value = message_1
read_1: Message = await subscriber.read()
ack_set_tracker.track.assert_has_calls([call(1)])
assert read_1.message_id == "1"
underlying.read.return_value = message_2
read_2: Message = await subscriber.read()
ack_set_tracker.track.assert_has_calls([call(1), call(2)])
assert read_2.message_id == "2"
read_2.ack()
await ack_called_queue.get()
await ack_result_queue.put(None)
ack_set_tracker.ack.assert_has_calls([call(2)])
read_1.ack()
await ack_called_queue.get()
await ack_result_queue.put(None)
ack_set_tracker.ack.assert_has_calls([call(2), call(1)])
async def test_track_failure(
subscriber: SinglePartitionSingleSubscriber,
underlying,
transformer,
ack_set_tracker,
):
async with subscriber:
ack_set_tracker.track.side_effect = FailedPrecondition("Bad track")
message = SequencedMessage(cursor=Cursor(offset=1), size_bytes=5)
underlying.read.return_value = message
with pytest.raises(FailedPrecondition):
await subscriber.read()
ack_set_tracker.track.assert_has_calls([call(1)])
async def test_ack_failure(
subscriber: SinglePartitionSingleSubscriber,
underlying,
transformer,
ack_set_tracker,
):
ack_called_queue = asyncio.Queue()
ack_result_queue = asyncio.Queue()
ack_set_tracker.ack.side_effect = make_queue_waiter(
ack_called_queue, ack_result_queue
)
async with subscriber:
message = SequencedMessage(cursor=Cursor(offset=1), size_bytes=5)
underlying.read.return_value = message
read: Message = await subscriber.read()
ack_set_tracker.track.assert_has_calls([call(1)])
read.ack()
await ack_called_queue.get()
ack_set_tracker.ack.assert_has_calls([call(1)])
await ack_result_queue.put(FailedPrecondition("Bad ack"))
async def sleep_forever():
await asyncio.sleep(float("inf"))
underlying.read.side_effect = sleep_forever
with pytest.raises(FailedPrecondition):
await subscriber.read()
async def test_nack_failure(
subscriber: SinglePartitionSingleSubscriber,
underlying,
transformer,
ack_set_tracker,
nack_handler,
):
async with subscriber:
message = SequencedMessage(cursor=Cursor(offset=1), size_bytes=5)
underlying.read.return_value = message
read: Message = await subscriber.read()
ack_set_tracker.track.assert_has_calls([call(1)])
nack_handler.on_nack.side_effect = FailedPrecondition("Bad nack")
read.nack()
async def sleep_forever():
await asyncio.sleep(float("inf"))
underlying.read.side_effect = sleep_forever
with pytest.raises(FailedPrecondition):
await subscriber.read()
async def test_nack_calls_ack(
subscriber: SinglePartitionSingleSubscriber,
underlying,
transformer,
ack_set_tracker,
nack_handler,
):
ack_called_queue = asyncio.Queue()
ack_result_queue = asyncio.Queue()
ack_set_tracker.ack.side_effect = make_queue_waiter(
ack_called_queue, ack_result_queue
)
async with subscriber:
message = SequencedMessage(cursor=Cursor(offset=1), size_bytes=5)
underlying.read.return_value = message
read: Message = await subscriber.read()
ack_set_tracker.track.assert_has_calls([call(1)])
def on_nack(nacked: PubsubMessage, ack: Callable[[], None]):
assert nacked.message_id == "1"
ack()
nack_handler.on_nack.side_effect = on_nack
read.nack()
await ack_called_queue.get()
await ack_result_queue.put(None)
ack_set_tracker.ack.assert_has_calls([call(1)])
async def test_handle_reset(
subscriber: SinglePartitionSingleSubscriber,
underlying,
transformer,
ack_set_tracker,
):
ack_called_queue = asyncio.Queue()
ack_result_queue = asyncio.Queue()
ack_set_tracker.ack.side_effect = make_queue_waiter(
ack_called_queue, ack_result_queue
)
async with subscriber:
message_1 = SequencedMessage(cursor=Cursor(offset=1), size_bytes=5)
underlying.read.return_value = message_1
read_1: Message = await subscriber.read()
ack_set_tracker.track.assert_has_calls([call(1)])
assert read_1.message_id == "1"
await subscriber.handle_reset()
ack_set_tracker.clear_and_commit.assert_called_once()
# After reset, flow control tokens of unacked messages are refilled,
# but offset not committed.
read_1.ack()
await ack_called_queue.get()
await ack_result_queue.put(None)
underlying.allow_flow.assert_has_calls(
[
call(FlowControlRequest(allowed_messages=1000, allowed_bytes=1000,)),
call(FlowControlRequest(allowed_messages=1, allowed_bytes=5,)),
]
)
ack_set_tracker.ack.assert_has_calls([])
message_2 = SequencedMessage(cursor=Cursor(offset=2), size_bytes=10)
underlying.read.return_value = message_2
read_2: Message = await subscriber.read()
ack_set_tracker.track.assert_has_calls([call(1), call(2)])
assert read_2.message_id == "2"
read_2.ack()
await ack_called_queue.get()
await ack_result_queue.put(None)
underlying.allow_flow.assert_has_calls(
[
call(FlowControlRequest(allowed_messages=1000, allowed_bytes=1000,)),
call(FlowControlRequest(allowed_messages=1, allowed_bytes=5,)),
call(FlowControlRequest(allowed_messages=1, allowed_bytes=10,)),
]
)
ack_set_tracker.ack.assert_has_calls([call(2)])
|
apache-2.0
|
jbedorf/tensorflow
|
tensorflow/contrib/rnn/python/ops/fused_rnn_cell.py
|
15
|
6099
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing fused RNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import rnn
@six.add_metaclass(abc.ABCMeta)
class FusedRNNCell(object):
"""Abstract object representing a fused RNN cell.
A fused RNN cell represents the entire RNN expanded over the time
dimension. In effect, this represents an entire recurrent network.
Unlike RNN cells which are subclasses of `rnn_cell.RNNCell`, a `FusedRNNCell`
operates on the entire time sequence at once, by putting the loop over time
inside the cell. This usually leads to much more efficient, but more complex
and less flexible implementations.
Every `FusedRNNCell` must implement `__call__` with the following signature.
"""
@abc.abstractmethod
def __call__(self,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
"""Run this fused RNN on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len x batch_size x input_size]`
or a list of `time_len` tensors of shape `[batch_size x input_size]`.
initial_state: either a tensor with shape `[batch_size x state_size]`
or a tuple with shapes `[batch_size x s] for s in state_size`, if the
cell takes tuples. If this is not provided, the cell is expected to
create a zero initial state of type `dtype`.
dtype: The data type for the initial state and expected output. Required
if `initial_state` is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)`.
Defaults to `time_len` for each element.
scope: `VariableScope` or `string` for the created subgraph; defaults to
class name.
Returns:
A pair containing:
- Output: A `3-D` tensor of shape `[time_len x batch_size x output_size]`
or a list of `time_len` tensors of shape `[batch_size x output_size]`,
to match the type of the `inputs`.
- Final state: Either a single `2-D` tensor, or a tuple of tensors
matching the arity and shapes of `initial_state`.
"""
pass
class FusedRNNCellAdaptor(FusedRNNCell):
"""This is an adaptor for RNNCell classes to be used with `FusedRNNCell`."""
def __init__(self, cell, use_dynamic_rnn=False):
"""Initialize the adaptor.
Args:
cell: an instance of a subclass of a `rnn_cell.RNNCell`.
use_dynamic_rnn: whether to use dynamic (or static) RNN.
"""
self._cell = cell
self._use_dynamic_rnn = use_dynamic_rnn
def __call__(self,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
is_list = isinstance(inputs, list)
if self._use_dynamic_rnn:
if is_list:
inputs = array_ops.stack(inputs)
outputs, state = rnn.dynamic_rnn(
self._cell,
inputs,
sequence_length=sequence_length,
initial_state=initial_state,
dtype=dtype,
time_major=True,
scope=scope)
if is_list:
# Convert outputs back to list
outputs = array_ops.unstack(outputs)
else: # non-dynamic rnn
if not is_list:
inputs = array_ops.unstack(inputs)
outputs, state = rnn.static_rnn(
self._cell,
inputs,
initial_state=initial_state,
dtype=dtype,
sequence_length=sequence_length,
scope=scope)
if not is_list:
# Convert outputs back to tensor
outputs = array_ops.stack(outputs)
return outputs, state
class TimeReversedFusedRNN(FusedRNNCell):
"""This is an adaptor to time-reverse a FusedRNNCell.
For example,
```python
cell = tf.contrib.rnn.BasicRNNCell(10)
fw_lstm = tf.contrib.rnn.FusedRNNCellAdaptor(cell, use_dynamic_rnn=True)
bw_lstm = tf.contrib.rnn.TimeReversedFusedRNN(fw_lstm)
fw_out, fw_state = fw_lstm(inputs)
bw_out, bw_state = bw_lstm(inputs)
```
"""
def __init__(self, cell):
self._cell = cell
def _reverse(self, t, lengths):
"""Time reverse the provided tensor or list of tensors.
Assumes the top dimension is the time dimension.
Args:
t: 3D tensor or list of 2D tensors to be reversed
lengths: 1D tensor of lengths, or `None`
Returns:
A reversed tensor or list of tensors
"""
if isinstance(t, list):
return list(reversed(t))
else:
if lengths is None:
return array_ops.reverse_v2(t, [0])
else:
return array_ops.reverse_sequence(t, lengths, 0, 1)
def __call__(self,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
inputs = self._reverse(inputs, sequence_length)
outputs, state = self._cell(
inputs,
initial_state=initial_state,
dtype=dtype,
sequence_length=sequence_length,
scope=scope)
outputs = self._reverse(outputs, sequence_length)
return outputs, state
|
apache-2.0
|
Alidron/demo-nao
|
alidron-env/lib/python2.7/site-packages/coverage/test_helpers.py
|
45
|
11036
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Mixin classes to help make good tests."""
import atexit
import collections
import contextlib
import os
import random
import shutil
import sys
import tempfile
import textwrap
from coverage.backunittest import TestCase
from coverage.backward import StringIO, to_bytes
class Tee(object):
"""A file-like that writes to all the file-likes it has."""
def __init__(self, *files):
"""Make a Tee that writes to all the files in `files.`"""
self._files = files
if hasattr(files[0], "encoding"):
self.encoding = files[0].encoding
def write(self, data):
"""Write `data` to all the files."""
for f in self._files:
f.write(data)
def flush(self):
"""Flush the data on all the files."""
for f in self._files:
f.flush()
if 0:
# Use this if you need to use a debugger, though it makes some tests
# fail, I'm not sure why...
def __getattr__(self, name):
return getattr(self._files[0], name)
@contextlib.contextmanager
def change_dir(new_dir):
"""Change directory, and then change back.
Use as a context manager, it will give you the new directory, and later
restore the old one.
"""
old_dir = os.getcwd()
os.chdir(new_dir)
try:
yield os.getcwd()
finally:
os.chdir(old_dir)
@contextlib.contextmanager
def saved_sys_path():
"""Save sys.path, and restore it later."""
old_syspath = sys.path[:]
try:
yield
finally:
sys.path = old_syspath
def setup_with_context_manager(testcase, cm):
"""Use a contextmanager to setUp a test case.
If you have a context manager you like::
with ctxmgr(a, b, c) as v:
# do something with v
and you want to have that effect for a test case, call this function from
your setUp, and it will start the context manager for your test, and end it
when the test is done::
def setUp(self):
self.v = setup_with_context_manager(self, ctxmgr(a, b, c))
def test_foo(self):
# do something with self.v
"""
val = cm.__enter__()
testcase.addCleanup(cm.__exit__, None, None, None)
return val
class ModuleAwareMixin(TestCase):
"""A test case mixin that isolates changes to sys.modules."""
def setUp(self):
super(ModuleAwareMixin, self).setUp()
# Record sys.modules here so we can restore it in cleanup_modules.
self.old_modules = list(sys.modules)
self.addCleanup(self.cleanup_modules)
def cleanup_modules(self):
"""Remove any new modules imported during the test run.
This lets us import the same source files for more than one test.
"""
for m in [m for m in sys.modules if m not in self.old_modules]:
del sys.modules[m]
class SysPathAwareMixin(TestCase):
"""A test case mixin that isolates changes to sys.path."""
def setUp(self):
super(SysPathAwareMixin, self).setUp()
setup_with_context_manager(self, saved_sys_path())
class EnvironmentAwareMixin(TestCase):
"""A test case mixin that isolates changes to the environment."""
def setUp(self):
super(EnvironmentAwareMixin, self).setUp()
# Record environment variables that we changed with set_environ.
self.environ_undos = {}
self.addCleanup(self.cleanup_environ)
def set_environ(self, name, value):
"""Set an environment variable `name` to be `value`.
The environment variable is set, and record is kept that it was set,
so that `cleanup_environ` can restore its original value.
"""
if name not in self.environ_undos:
self.environ_undos[name] = os.environ.get(name)
os.environ[name] = value
def cleanup_environ(self):
"""Undo all the changes made by `set_environ`."""
for name, value in self.environ_undos.items():
if value is None:
del os.environ[name]
else:
os.environ[name] = value
class StdStreamCapturingMixin(TestCase):
"""A test case mixin that captures stdout and stderr."""
def setUp(self):
super(StdStreamCapturingMixin, self).setUp()
# Capture stdout and stderr so we can examine them in tests.
# nose keeps stdout from littering the screen, so we can safely Tee it,
# but it doesn't capture stderr, so we don't want to Tee stderr to the
# real stderr, since it will interfere with our nice field of dots.
self.old_stdout = sys.stdout
self.captured_stdout = StringIO()
sys.stdout = Tee(sys.stdout, self.captured_stdout)
self.old_stderr = sys.stderr
self.captured_stderr = StringIO()
sys.stderr = self.captured_stderr
self.addCleanup(self.cleanup_std_streams)
def cleanup_std_streams(self):
"""Restore stdout and stderr."""
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
def stdout(self):
"""Return the data written to stdout during the test."""
return self.captured_stdout.getvalue()
def stderr(self):
"""Return the data written to stderr during the test."""
return self.captured_stderr.getvalue()
class TempDirMixin(SysPathAwareMixin, ModuleAwareMixin, TestCase):
"""A test case mixin that creates a temp directory and files in it.
Includes SysPathAwareMixin and ModuleAwareMixin, because making and using
temp directories like this will also need that kind of isolation.
"""
# Our own setting: most of these tests run in their own temp directory.
# Set this to False in your subclass if you don't want a temp directory
# created.
run_in_temp_dir = True
# Set this if you aren't creating any files with make_file, but still want
# the temp directory. This will stop the test behavior checker from
# complaining.
no_files_in_temp_dir = False
def setUp(self):
super(TempDirMixin, self).setUp()
if self.run_in_temp_dir:
# Create a temporary directory.
self.temp_dir = self.make_temp_dir("test_cover")
self.chdir(self.temp_dir)
# Modules should be importable from this temp directory. We don't
# use '' because we make lots of different temp directories and
# nose's caching importer can get confused. The full path prevents
# problems.
sys.path.insert(0, os.getcwd())
class_behavior = self.class_behavior()
class_behavior.tests += 1
class_behavior.temp_dir = self.run_in_temp_dir
class_behavior.no_files_ok = self.no_files_in_temp_dir
self.addCleanup(self.check_behavior)
def make_temp_dir(self, slug="test_cover"):
"""Make a temp directory that is cleaned up when the test is done."""
name = "%s_%08d" % (slug, random.randint(0, 99999999))
temp_dir = os.path.join(tempfile.gettempdir(), name)
os.makedirs(temp_dir)
self.addCleanup(shutil.rmtree, temp_dir)
return temp_dir
def chdir(self, new_dir):
"""Change directory, and change back when the test is done."""
old_dir = os.getcwd()
os.chdir(new_dir)
self.addCleanup(os.chdir, old_dir)
def check_behavior(self):
"""Check that we did the right things."""
class_behavior = self.class_behavior()
if class_behavior.test_method_made_any_files:
class_behavior.tests_making_files += 1
def make_file(self, filename, text="", newline=None):
"""Create a file for testing.
`filename` is the relative path to the file, including directories if
desired, which will be created if need be.
`text` is the content to create in the file, a native string (bytes in
Python 2, unicode in Python 3).
If `newline` is provided, it is a string that will be used as the line
endings in the created file, otherwise the line endings are as provided
in `text`.
Returns `filename`.
"""
# Tests that call `make_file` should be run in a temp environment.
assert self.run_in_temp_dir
self.class_behavior().test_method_made_any_files = True
text = textwrap.dedent(text)
if newline:
text = text.replace("\n", newline)
# Make sure the directories are available.
dirs, _ = os.path.split(filename)
if dirs and not os.path.exists(dirs):
os.makedirs(dirs)
# Create the file.
with open(filename, 'wb') as f:
f.write(to_bytes(text))
return filename
# We run some tests in temporary directories, because they may need to make
# files for the tests. But this is expensive, so we can change per-class
# whether a temp directory is used or not. It's easy to forget to set that
# option properly, so we track information about what the tests did, and
# then report at the end of the process on test classes that were set
# wrong.
class ClassBehavior(object):
"""A value object to store per-class."""
def __init__(self):
self.tests = 0
self.skipped = 0
self.temp_dir = True
self.no_files_ok = False
self.tests_making_files = 0
self.test_method_made_any_files = False
# Map from class to info about how it ran.
class_behaviors = collections.defaultdict(ClassBehavior)
@classmethod
def report_on_class_behavior(cls):
"""Called at process exit to report on class behavior."""
for test_class, behavior in cls.class_behaviors.items():
bad = ""
if behavior.tests <= behavior.skipped:
bad = ""
elif behavior.temp_dir and behavior.tests_making_files == 0:
if not behavior.no_files_ok:
bad = "Inefficient"
elif not behavior.temp_dir and behavior.tests_making_files > 0:
bad = "Unsafe"
if bad:
if behavior.temp_dir:
where = "in a temp directory"
else:
where = "without a temp directory"
print(
"%s: %s ran %d tests, %d made files %s" % (
bad,
test_class.__name__,
behavior.tests,
behavior.tests_making_files,
where,
)
)
def class_behavior(self):
"""Get the ClassBehavior instance for this test."""
return self.class_behaviors[self.__class__]
# When the process ends, find out about bad classes.
atexit.register(TempDirMixin.report_on_class_behavior)
|
mpl-2.0
|
cyberark-bizdev/ansible
|
lib/ansible/modules/cloud/docker/docker_volume.py
|
45
|
7796
|
#!/usr/bin/python
# coding: utf-8
#
# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = u'''
module: docker_volume
version_added: "2.4"
short_description: Manage Docker volumes
description:
- Create/remove Docker volumes.
- Performs largely the same function as the "docker volume" CLI subcommand.
options:
name:
description:
- Name of the volume to operate on.
required: true
aliases:
- volume_name
driver:
description:
- Specify the type of volume. Docker provides the C(local) driver, but 3rd party drivers can also be used.
default: local
driver_options:
description:
- "Dictionary of volume settings. Consult docker docs for valid options and values:
U(https://docs.docker.com/engine/reference/commandline/volume_create/#driver-specific-options)"
labels:
description:
- List of labels to set for the volume
force:
description:
- With state C(present) causes the volume to be deleted and recreated if the volume already
exist and the driver, driver options or labels differ. This will cause any data in the existing
volume to be lost.
type: bool
default: 'no'
state:
description:
- C(absent) deletes the volume.
- C(present) creates the volume, if it does not already exist.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- docker
author:
- Alex Grönholm (@agronholm)
requirements:
- "python >= 2.6"
- "docker-py >= 1.10.0"
- "The docker server >= 1.9.0"
'''
EXAMPLES = '''
- name: Create a volume
docker_volume:
name: volume_one
- name: Remove a volume
docker_volume:
name: volume_one
state: absent
- name: Create a volume with options
docker_volume:
name: volume_two
driver_options:
type: btrfs
device: /dev/sda2
'''
RETURN = '''
facts:
description: Volume inspection results for the affected volume.
returned: success
type: dict
sample: {}
'''
try:
from docker.errors import APIError
except ImportError:
# missing docker-py handled in ansible.module_utils.docker
pass
from ansible.module_utils.docker_common import DockerBaseClass, AnsibleDockerClient
from ansible.module_utils.six import iteritems, text_type
class TaskParameters(DockerBaseClass):
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.volume_name = None
self.driver = None
self.driver_options = None
self.labels = None
self.force = None
self.debug = None
for key, value in iteritems(client.module.params):
setattr(self, key, value)
class DockerVolumeManager(object):
def __init__(self, client):
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {
u'changed': False,
u'actions': []
}
self.diff = self.client.module._diff
self.existing_volume = self.get_existing_volume()
state = self.parameters.state
if state == 'present':
self.present()
elif state == 'absent':
self.absent()
def get_existing_volume(self):
try:
volumes = self.client.volumes()
except APIError as e:
self.client.fail(text_type(e))
if volumes[u'Volumes'] is None:
return None
for volume in volumes[u'Volumes']:
if volume['Name'] == self.parameters.volume_name:
return volume
return None
def has_different_config(self):
"""
Return the list of differences between the current parameters and the existing volume.
:return: list of options that differ
"""
differences = []
if self.parameters.driver and self.parameters.driver != self.existing_volume['Driver']:
differences.append('driver')
if self.parameters.driver_options:
if not self.existing_volume.get('Options'):
differences.append('driver_options')
else:
for key, value in iteritems(self.parameters.driver_options):
if (not self.existing_volume['Options'].get(key) or
value != self.existing_volume['Options'][key]):
differences.append('driver_options.%s' % key)
if self.parameters.labels:
existing_labels = self.existing_volume.get('Labels', {})
all_labels = set(self.parameters.labels) | set(existing_labels)
for label in all_labels:
if existing_labels.get(label) != self.parameters.labels.get(label):
differences.append('labels.%s' % label)
return differences
def create_volume(self):
if not self.existing_volume:
if not self.check_mode:
try:
resp = self.client.create_volume(self.parameters.volume_name,
driver=self.parameters.driver,
driver_opts=self.parameters.driver_options,
labels=self.parameters.labels)
self.existing_volume = self.client.inspect_volume(resp['Name'])
except APIError as e:
self.client.fail(text_type(e))
self.results['actions'].append("Created volume %s with driver %s" % (self.parameters.volume_name, self.parameters.driver))
self.results['changed'] = True
def remove_volume(self):
if self.existing_volume:
if not self.check_mode:
try:
self.client.remove_volume(self.parameters.volume_name)
except APIError as e:
self.client.fail(text_type(e))
self.results['actions'].append("Removed volume %s" % self.parameters.volume_name)
self.results['changed'] = True
def present(self):
differences = []
if self.existing_volume:
differences = self.has_different_config()
if differences and self.parameters.force:
self.remove_volume()
self.existing_volume = None
self.create_volume()
if self.diff or self.check_mode or self.parameters.debug:
self.results['diff'] = differences
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
self.results['ansible_facts'] = {u'docker_volume': self.get_existing_volume()}
def absent(self):
self.remove_volume()
def main():
argument_spec = dict(
volume_name=dict(type='str', required=True, aliases=['name']),
state=dict(type='str', default='present', choices=['present', 'absent']),
driver=dict(type='str', default='local'),
driver_options=dict(type='dict', default={}),
labels=dict(type='list'),
force=dict(type='bool', default=False),
debug=dict(type='bool', default=False)
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True
)
cm = DockerVolumeManager(client)
client.module.exit_json(**cm.results)
if __name__ == '__main__':
main()
|
gpl-3.0
|
jxta/cc
|
vendor/python-gflags/gflags.py
|
4
|
84466
|
#!/usr/bin/env python
# Copyright (c) 2007, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ---
# Author: Chad Lester
# Design and style contributions by:
# Amit Patel, Bogdan Cocosel, Daniel Dulitz, Eric Tiedemann,
# Eric Veach, Laurence Gonsalves, Matthew Springer
# Code reorganized a bit by Craig Silverstein
"""This module is used to define and parse command line flags.
This module defines a *distributed* flag-definition policy: rather than
an application having to define all flags in or near main(), each python
module defines flags that are useful to it. When one python module
imports another, it gains access to the other's flags. (This is
implemented by having all modules share a common, global registry object
containing all the flag information.)
Flags are defined through the use of one of the DEFINE_xxx functions.
The specific function used determines how the flag is parsed, checked,
and optionally type-converted, when it's seen on the command line.
IMPLEMENTATION: DEFINE_* creates a 'Flag' object and registers it with a
'FlagValues' object (typically the global FlagValues FLAGS, defined
here). The 'FlagValues' object can scan the command line arguments and
pass flag arguments to the corresponding 'Flag' objects for
value-checking and type conversion. The converted flag values are
available as attributes of the 'FlagValues' object.
Code can access the flag through a FlagValues object, for instance
gflags.FLAGS.myflag. Typically, the __main__ module passes the
command line arguments to gflags.FLAGS for parsing.
At bottom, this module calls getopt(), so getopt functionality is
supported, including short- and long-style flags, and the use of -- to
terminate flags.
Methods defined by the flag module will throw 'FlagsError' exceptions.
The exception argument will be a human-readable string.
FLAG TYPES: This is a list of the DEFINE_*'s that you can do. All flags
take a name, default value, help-string, and optional 'short' name
(one-letter name). Some flags have other arguments, which are described
with the flag.
DEFINE_string: takes any input, and interprets it as a string.
DEFINE_bool or
DEFINE_boolean: typically does not take an argument: say --myflag to
set FLAGS.myflag to true, or --nomyflag to set
FLAGS.myflag to false. Alternately, you can say
--myflag=true or --myflag=t or --myflag=1 or
--myflag=false or --myflag=f or --myflag=0
DEFINE_float: takes an input and interprets it as a floating point
number. Takes optional args lower_bound and upper_bound;
if the number specified on the command line is out of
range, it will raise a FlagError.
DEFINE_integer: takes an input and interprets it as an integer. Takes
optional args lower_bound and upper_bound as for floats.
DEFINE_enum: takes a list of strings which represents legal values. If
the command-line value is not in this list, raise a flag
error. Otherwise, assign to FLAGS.flag as a string.
DEFINE_list: Takes a comma-separated list of strings on the commandline.
Stores them in a python list object.
DEFINE_spaceseplist: Takes a space-separated list of strings on the
commandline. Stores them in a python list object.
Example: --myspacesepflag "foo bar baz"
DEFINE_multistring: The same as DEFINE_string, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of strings),
even if the flag is only on the command line once.
DEFINE_multi_int: The same as DEFINE_integer, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of ints), even if
the flag is only on the command line once.
SPECIAL FLAGS: There are a few flags that have special meaning:
--help prints a list of all the flags in a human-readable fashion
--helpshort prints a list of all key flags (see below).
--helpxml prints a list of all flags, in XML format. DO NOT parse
the output of --help and --helpshort. Instead, parse
the output of --helpxml. As we add new flags, we may
add new XML elements. Hence, make sure your parser
does not crash when it encounters new XML elements.
--flagfile=foo read flags from foo.
--undefok=f1,f2 ignore unrecognized option errors for f1,f2.
For boolean flags, you should use --undefok=boolflag, and
--boolflag and --noboolflag will be accepted. Do not use
--undefok=noboolflag.
-- as in getopt(), terminates flag-processing
NOTE ON --flagfile:
Flags may be loaded from text files in addition to being specified on
the commandline.
Any flags you don't feel like typing, throw them in a file, one flag per
line, for instance:
--myflag=myvalue
--nomyboolean_flag
You then specify your file with the special flag '--flagfile=somefile'.
You CAN recursively nest flagfile= tokens OR use multiple files on the
command line. Lines beginning with a single hash '#' or a double slash
'//' are comments in your flagfile.
Any flagfile=<file> will be interpreted as having a relative path from
the current working directory rather than from the place the file was
included from:
myPythonScript.py --flagfile=config/somefile.cfg
If somefile.cfg includes further --flagfile= directives, these will be
referenced relative to the original CWD, not from the directory the
including flagfile was found in!
The caveat applies to people who are including a series of nested files
in a different dir than they are executing out of. Relative path names
are always from CWD, not from the directory of the parent include
flagfile. We do now support '~' expanded directory names.
Absolute path names ALWAYS work!
EXAMPLE USAGE:
import gflags
FLAGS = gflags.FLAGS
# Flag names are globally defined! So in general, we need to be
# careful to pick names that are unlikely to be used by other libraries.
# If there is a conflict, we'll get an error at import time.
gflags.DEFINE_string('name', 'Mr. President', 'your name')
gflags.DEFINE_integer('age', None, 'your age in years', lower_bound=0)
gflags.DEFINE_boolean('debug', False, 'produces debugging output')
gflags.DEFINE_enum('gender', 'male', ['male', 'female'], 'your gender')
def main(argv):
try:
argv = FLAGS(argv) # parse flags
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
if FLAGS.debug: print 'non-flag arguments:', argv
print 'Happy Birthday', FLAGS.name
if FLAGS.age is not None:
print 'You are a %s, who is %d years old' % (FLAGS.gender, FLAGS.age)
if __name__ == '__main__':
main(sys.argv)
KEY FLAGS:
As we already explained, each module gains access to all flags defined
by all the other modules it transitively imports. In the case of
non-trivial scripts, this means a lot of flags ... For documentation
purposes, it is good to identify the flags that are key (i.e., really
important) to a module. Clearly, the concept of "key flag" is a
subjective one. When trying to determine whether a flag is key to a
module or not, assume that you are trying to explain your module to a
potential user: which flags would you really like to mention first?
We'll describe shortly how to declare which flags are key to a module.
For the moment, assume we know the set of key flags for each module.
Then, if you use the app.py module, you can use the --helpshort flag to
print only the help for the flags that are key to the main module, in a
human-readable format.
NOTE: If you need to parse the flag help, do NOT use the output of
--help / --helpshort. That output is meant for human consumption, and
may be changed in the future. Instead, use --helpxml; flags that are
key for the main module are marked there with a <key>yes</key> element.
The set of key flags for a module M is composed of:
1. Flags defined by module M by calling a DEFINE_* function.
2. Flags that module M explictly declares as key by using the function
DECLARE_key_flag(<flag_name>)
3. Key flags of other modules that M specifies by using the function
ADOPT_module_key_flags(<other_module>)
This is a "bulk" declaration of key flags: each flag that is key for
<other_module> becomes key for the current module too.
Notice that if you do not use the functions described at points 2 and 3
above, then --helpshort prints information only about the flags defined
by the main module of our script. In many cases, this behavior is good
enough. But if you move part of the main module code (together with the
related flags) into a different module, then it is nice to use
DECLARE_key_flag / ADOPT_module_key_flags and make sure --helpshort
lists all relevant flags (otherwise, your code refactoring may confuse
your users).
Note: each of DECLARE_key_flag / ADOPT_module_key_flags has its own
pluses and minuses: DECLARE_key_flag is more targeted and may lead a
more focused --helpshort documentation. ADOPT_module_key_flags is good
for cases when an entire module is considered key to the current script.
Also, it does not require updates to client scripts when a new flag is
added to the module.
EXAMPLE USAGE 2 (WITH KEY FLAGS):
Consider an application that contains the following three files (two
auxiliary modules and a main module):
File libfoo.py:
import gflags
gflags.DEFINE_integer('num_replicas', 3, 'Number of replicas to start')
gflags.DEFINE_boolean('rpc2', True, 'Turn on the usage of RPC2.')
... some code ...
File libbar.py:
import gflags
gflags.DEFINE_string('bar_gfs_path', '/gfs/path',
'Path to the GFS files for libbar.')
gflags.DEFINE_string('email_for_bar_errors', 'bar-team@google.com',
'Email address for bug reports about module libbar.')
gflags.DEFINE_boolean('bar_risky_hack', False,
'Turn on an experimental and buggy optimization.')
... some code ...
File myscript.py:
import gflags
import libfoo
import libbar
gflags.DEFINE_integer('num_iterations', 0, 'Number of iterations.')
# Declare that all flags that are key for libfoo are
# key for this module too.
gflags.ADOPT_module_key_flags(libfoo)
# Declare that the flag --bar_gfs_path (defined in libbar) is key
# for this module.
gflags.DECLARE_key_flag('bar_gfs_path')
... some code ...
When myscript is invoked with the flag --helpshort, the resulted help
message lists information about all the key flags for myscript:
--num_iterations, --num_replicas, --rpc2, and --bar_gfs_path (in
addition to the special flags --help and --helpshort).
Of course, myscript uses all the flags declared by it (in this case,
just --num_replicas) or by any of the modules it transitively imports
(e.g., the modules libfoo, libbar). E.g., it can access the value of
FLAGS.bar_risky_hack, even if --bar_risky_hack is not declared as a key
flag for myscript.
"""
import cgi
import getopt
import os
import re
import string
import sys
# Are we running at least python 2.2?
try:
if tuple(sys.version_info[:3]) < (2,2,0):
raise NotImplementedError("requires python 2.2.0 or later")
except AttributeError: # a very old python, that lacks sys.version_info
raise NotImplementedError("requires python 2.2.0 or later")
# If we're not running at least python 2.2.1, define True, False, and bool.
# Thanks, Guido, for the code.
try:
True, False, bool
except NameError:
False = 0
True = 1
def bool(x):
if x:
return True
else:
return False
# Are we running under pychecker?
_RUNNING_PYCHECKER = 'pychecker.python' in sys.modules
def _GetCallingModule():
"""Returns the name of the module that's calling into this module.
We generally use this function to get the name of the module calling a
DEFINE_foo... function.
"""
# Walk down the stack to find the first globals dict that's not ours.
for depth in range(1, sys.getrecursionlimit()):
if not sys._getframe(depth).f_globals is globals():
module_name = __GetModuleName(sys._getframe(depth).f_globals)
if module_name is not None:
return module_name
raise AssertionError("No module was found")
# module exceptions:
class FlagsError(Exception):
"""The base class for all flags errors."""
pass
class DuplicateFlag(FlagsError):
"""Raised if there is a flag naming conflict."""
pass
# A DuplicateFlagError conveys more information than a
# DuplicateFlag. Since there are external modules that create
# DuplicateFlags, the interface to DuplicateFlag shouldn't change.
class DuplicateFlagError(DuplicateFlag):
def __init__(self, flagname, flag_values):
self.flagname = flagname
message = "The flag '%s' is defined twice." % self.flagname
flags_by_module = flag_values.FlagsByModuleDict()
for module in flags_by_module:
for flag in flags_by_module[module]:
if flag.name == flagname or flag.short_name == flagname:
message = message + " First from " + module + ","
break
message = message + " Second from " + _GetCallingModule()
DuplicateFlag.__init__(self, message)
class IllegalFlagValue(FlagsError):
"""The flag command line argument is illegal."""
pass
class UnrecognizedFlag(FlagsError):
"""Raised if a flag is unrecognized."""
pass
# An UnrecognizedFlagError conveys more information than an
# UnrecognizedFlag. Since there are external modules that create
# DuplicateFlags, the interface to DuplicateFlag shouldn't change.
class UnrecognizedFlagError(UnrecognizedFlag):
def __init__(self, flagname):
self.flagname = flagname
UnrecognizedFlag.__init__(
self, "Unknown command line flag '%s'" % flagname)
# Global variable used by expvar
_exported_flags = {}
_help_width = 80 # width of help output
def GetHelpWidth():
"""Returns: an integer, the width of help lines that is used in TextWrap."""
return _help_width
def CutCommonSpacePrefix(text):
"""Removes a common space prefix from the lines of a multiline text.
If the first line does not start with a space, it is left as it is and
only in the remaining lines a common space prefix is being searched
for. That means the first line will stay untouched. This is especially
useful to turn doc strings into help texts. This is because some
people prefer to have the doc comment start already after the
apostrophy and then align the following lines while others have the
apostrophies on a seperately line.
The function also drops trailing empty lines and ignores empty lines
following the initial content line while calculating the initial
common whitespace.
Args:
text: text to work on
Returns:
the resulting text
"""
text_lines = text.splitlines()
# Drop trailing empty lines
while text_lines and not text_lines[-1]:
text_lines = text_lines[:-1]
if text_lines:
# We got some content, is the first line starting with a space?
if text_lines[0] and text_lines[0][0].isspace():
text_first_line = []
else:
text_first_line = [text_lines.pop(0)]
# Calculate length of common leading whitesppace (only over content lines)
common_prefix = os.path.commonprefix([line for line in text_lines if line])
space_prefix_len = len(common_prefix) - len(common_prefix.lstrip())
# If we have a common space prefix, drop it from all lines
if space_prefix_len:
for index in xrange(len(text_lines)):
if text_lines[index]:
text_lines[index] = text_lines[index][space_prefix_len:]
return '\n'.join(text_first_line + text_lines)
return ''
def TextWrap(text, length=None, indent='', firstline_indent=None, tabs=' '):
"""Wraps a given text to a maximum line length and returns it.
We turn lines that only contain whitespaces into empty lines. We keep
new lines and tabs (e.g., we do not treat tabs as spaces).
Args:
text: text to wrap
length: maximum length of a line, includes indentation
if this is None then use GetHelpWidth()
indent: indent for all but first line
firstline_indent: indent for first line; if None, fall back to indent
tabs: replacement for tabs
Returns:
wrapped text
Raises:
FlagsError: if indent not shorter than length
FlagsError: if firstline_indent not shorter than length
"""
# Get defaults where callee used None
if length is None:
length = GetHelpWidth()
if indent is None:
indent = ''
if len(indent) >= length:
raise FlagsError('Indent must be shorter than length')
# In line we will be holding the current line which is to be started
# with indent (or firstline_indent if available) and then appended
# with words.
if firstline_indent is None:
firstline_indent = ''
line = indent
else:
line = firstline_indent
if len(firstline_indent) >= length:
raise FlagsError('First iline indent must be shorter than length')
# If the callee does not care about tabs we simply convert them to
# spaces If callee wanted tabs to be single space then we do that
# already here.
if not tabs or tabs == ' ':
text = text.replace('\t', ' ')
else:
tabs_are_whitespace = not tabs.strip()
line_regex = re.compile('([ ]*)(\t*)([^ \t]+)', re.MULTILINE)
# Split the text into lines and the lines with the regex above. The
# resulting lines are collected in result[]. For each split we get the
# spaces, the tabs and the next non white space (e.g. next word).
result = []
for text_line in text.splitlines():
# Store result length so we can find out whether processing the next
# line gave any new content
old_result_len = len(result)
# Process next line with line_regex. For optimization we do an rstrip().
# - process tabs (changes either line or word, see below)
# - process word (first try to squeeze on line, then wrap or force wrap)
# Spaces found on the line are ignored, they get added while wrapping as
# needed.
for spaces, current_tabs, word in line_regex.findall(text_line.rstrip()):
# If tabs weren't converted to spaces, handle them now
if current_tabs:
# If the last thing we added was a space anyway then drop
# it. But let's not get rid of the indentation.
if (((result and line != indent) or
(not result and line != firstline_indent)) and line[-1] == ' '):
line = line[:-1]
# Add the tabs, if that means adding whitespace, just add it at
# the line, the rstrip() code while shorten the line down if
# necessary
if tabs_are_whitespace:
line += tabs * len(current_tabs)
else:
# if not all tab replacement is whitespace we prepend it to the word
word = tabs * len(current_tabs) + word
# Handle the case where word cannot be squeezed onto current last line
if len(line) + len(word) > length and len(indent) + len(word) <= length:
result.append(line.rstrip())
line = indent + word
word = ''
# No space left on line or can we append a space?
if len(line) + 1 >= length:
result.append(line.rstrip())
line = indent
else:
line += ' '
# Add word and shorten it up to allowed line length. Restart next
# line with indent and repeat, or add a space if we're done (word
# finished) This deals with words that caanot fit on one line
# (e.g. indent + word longer than allowed line length).
while len(line) + len(word) >= length:
line += word
result.append(line[:length])
word = line[length:]
line = indent
# Default case, simply append the word and a space
if word:
line += word + ' '
# End of input line. If we have content we finish the line. If the
# current line is just the indent but we had content in during this
# original line then we need to add an emoty line.
if (result and line != indent) or (not result and line != firstline_indent):
result.append(line.rstrip())
elif len(result) == old_result_len:
result.append('')
line = indent
return '\n'.join(result)
def DocToHelp(doc):
"""Takes a __doc__ string and reformats it as help."""
# Get rid of starting and ending white space. Using lstrip() or even
# strip() could drop more than maximum of first line and right space
# of last line.
doc = doc.strip()
# Get rid of all empty lines
whitespace_only_line = re.compile('^[ \t]+$', re.M)
doc = whitespace_only_line.sub('', doc)
# Cut out common space at line beginnings
doc = CutCommonSpacePrefix(doc)
# Just like this module's comment, comments tend to be aligned somehow.
# In other words they all start with the same amount of white space
# 1) keep double new lines
# 2) keep ws after new lines if not empty line
# 3) all other new lines shall be changed to a space
# Solution: Match new lines between non white space and replace with space.
doc = re.sub('(?<=\S)\n(?=\S)', ' ', doc, re.M)
return doc
def __GetModuleName(globals_dict):
"""Given a globals dict, returns the name of the module that defines it.
Args:
globals_dict: A dictionary that should correspond to an environment
providing the values of the globals.
Returns:
A string (the name of the module) or None (if the module could not
be identified.
"""
for name, module in sys.modules.iteritems():
if getattr(module, '__dict__', None) is globals_dict:
if name == '__main__':
return sys.argv[0]
return name
return None
def _GetMainModule():
"""Returns the name of the module from which execution started."""
for depth in range(1, sys.getrecursionlimit()):
try:
globals_of_main = sys._getframe(depth).f_globals
except ValueError:
return __GetModuleName(globals_of_main)
raise AssertionError("No module was found")
class FlagValues:
"""Registry of 'Flag' objects.
A 'FlagValues' can then scan command line arguments, passing flag
arguments through to the 'Flag' objects that it owns. It also
provides easy access to the flag values. Typically only one
'FlagValues' object is needed by an application: gflags.FLAGS
This class is heavily overloaded:
'Flag' objects are registered via __setitem__:
FLAGS['longname'] = x # register a new flag
The .value attribute of the registered 'Flag' objects can be accessed
as attributes of this 'FlagValues' object, through __getattr__. Both
the long and short name of the original 'Flag' objects can be used to
access its value:
FLAGS.longname # parsed flag value
FLAGS.x # parsed flag value (short name)
Command line arguments are scanned and passed to the registered 'Flag'
objects through the __call__ method. Unparsed arguments, including
argv[0] (e.g. the program name) are returned.
argv = FLAGS(sys.argv) # scan command line arguments
The original registered Flag objects can be retrieved through the use
of the dictionary-like operator, __getitem__:
x = FLAGS['longname'] # access the registered Flag object
The str() operator of a 'FlagValues' object provides help for all of
the registered 'Flag' objects.
"""
def __init__(self):
# Since everything in this class is so heavily overloaded, the only
# way of defining and using fields is to access __dict__ directly.
# Dictionary: flag name (string) -> Flag object.
self.__dict__['__flags'] = {}
# Dictionary: module name (string) -> list of Flag objects that are defined
# by that module.
self.__dict__['__flags_by_module'] = {}
# Dictionary: module name (string) -> list of Flag objects that are
# key for that module.
self.__dict__['__key_flags_by_module'] = {}
# Set if we should use new style gnu_getopt rather than getopt when parsing
# the args. Only possible with Python 2.3+
self.UseGnuGetOpt(False)
def UseGnuGetOpt(self, use_gnu_getopt=True):
self.__dict__['__use_gnu_getopt'] = use_gnu_getopt
def IsGnuGetOpt(self):
return self.__dict__['__use_gnu_getopt']
def FlagDict(self):
return self.__dict__['__flags']
def FlagsByModuleDict(self):
"""Returns the dictionary of module_name -> list of defined flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__flags_by_module']
def KeyFlagsByModuleDict(self):
"""Returns the dictionary of module_name -> list of key flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__key_flags_by_module']
def _RegisterFlagByModule(self, module_name, flag):
"""Records the module that defines a specific flag.
We keep track of which flag is defined by which module so that we
can later sort the flags by module.
Args:
module_name: A string, the name of a Python module.
flag: A Flag object, a flag that is key to the module.
"""
flags_by_module = self.FlagsByModuleDict()
flags_by_module.setdefault(module_name, []).append(flag)
def _RegisterKeyFlagForModule(self, module_name, flag):
"""Specifies that a flag is a key flag for a module.
Args:
module_name: A string, the name of a Python module.
flag: A Flag object, a flag that is key to the module.
"""
key_flags_by_module = self.KeyFlagsByModuleDict()
# The list of key flags for the module named module_name.
key_flags = key_flags_by_module.setdefault(module_name, [])
# Add flag, but avoid duplicates.
if flag not in key_flags:
key_flags.append(flag)
def _GetFlagsDefinedByModule(self, module):
"""Returns the list of flags defined by a module.
Args:
module: A module object or a module name (a string).
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
return list(self.FlagsByModuleDict().get(module, []))
def _GetKeyFlagsForModule(self, module):
"""Returns the list of key flags for a module.
Args:
module: A module object or a module name (a string)
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
# Any flag is a key flag for the module that defined it. NOTE:
# key_flags is a fresh list: we can update it without affecting the
# internals of this FlagValues object.
key_flags = self._GetFlagsDefinedByModule(module)
# Take into account flags explicitly declared as key for a module.
for flag in self.KeyFlagsByModuleDict().get(module, []):
if flag not in key_flags:
key_flags.append(flag)
return key_flags
def AppendFlagValues(self, flag_values):
"""Appends flags registered in another FlagValues instance.
Args:
flag_values: registry to copy from
"""
for flag_name, flag in flag_values.FlagDict().iteritems():
# Each flags with shortname appears here twice (once under its
# normal name, and again with its short name). To prevent
# problems (DuplicateFlagError) with double flag registration, we
# perform a check to make sure that the entry we're looking at is
# for its normal name.
if flag_name == flag.name:
self[flag_name] = flag
def __setitem__(self, name, flag):
"""Registers a new flag variable."""
fl = self.FlagDict()
if not isinstance(flag, Flag):
raise IllegalFlagValue(flag)
if not isinstance(name, type("")):
raise FlagsError("Flag name must be a string")
if len(name) == 0:
raise FlagsError("Flag name cannot be empty")
# If running under pychecker, duplicate keys are likely to be
# defined. Disable check for duplicate keys when pycheck'ing.
if (fl.has_key(name) and not flag.allow_override and
not fl[name].allow_override and not _RUNNING_PYCHECKER):
raise DuplicateFlagError(name, self)
short_name = flag.short_name
if short_name is not None:
if (fl.has_key(short_name) and not flag.allow_override and
not fl[short_name].allow_override and not _RUNNING_PYCHECKER):
raise DuplicateFlagError(short_name, self)
fl[short_name] = flag
fl[name] = flag
global _exported_flags
_exported_flags[name] = flag
def __getitem__(self, name):
"""Retrieves the Flag object for the flag --name."""
return self.FlagDict()[name]
def __getattr__(self, name):
"""Retrieves the 'value' attribute of the flag --name."""
fl = self.FlagDict()
if not fl.has_key(name):
raise AttributeError(name)
return fl[name].value
def __setattr__(self, name, value):
"""Sets the 'value' attribute of the flag --name."""
fl = self.FlagDict()
fl[name].value = value
return value
def _FlagIsRegistered(self, flag_obj):
"""Checks whether a Flag object is registered under some name.
Note: this is non trivial: in addition to its normal name, a flag
may have a short name too. In self.FlagDict(), both the normal and
the short name are mapped to the same flag object. E.g., calling
only "del FLAGS.short_name" is not unregistering the corresponding
Flag object (it is still registered under the longer name).
Args:
flag_obj: A Flag object.
Returns:
A boolean: True iff flag_obj is registered under some name.
"""
flag_dict = self.FlagDict()
# Check whether flag_obj is registered under its long name.
name = flag_obj.name
if flag_dict.get(name, None) == flag_obj:
return True
# Check whether flag_obj is registered under its short name.
short_name = flag_obj.short_name
if (short_name is not None and
flag_dict.get(short_name, None) == flag_obj):
return True
# The flag cannot be registered under any other name, so we do not
# need to do a full search through the values of self.FlagDict().
return False
def __delattr__(self, flag_name):
"""Deletes a previously-defined flag from a flag object.
This method makes sure we can delete a flag by using
del flag_values_object.<flag_name>
E.g.,
flags.DEFINE_integer('foo', 1, 'Integer flag.')
del flags.FLAGS.foo
Args:
flag_name: A string, the name of the flag to be deleted.
Raises:
AttributeError: When there is no registered flag named flag_name.
"""
fl = self.FlagDict()
if flag_name not in fl:
raise AttributeError(flag_name)
flag_obj = fl[flag_name]
del fl[flag_name]
if not self._FlagIsRegistered(flag_obj):
# If the Flag object indicated by flag_name is no longer
# registered (please see the docstring of _FlagIsRegistered), then
# we delete the occurences of the flag object in all our internal
# dictionaries.
self.__RemoveFlagFromDictByModule(self.FlagsByModuleDict(), flag_obj)
self.__RemoveFlagFromDictByModule(self.KeyFlagsByModuleDict(), flag_obj)
def __RemoveFlagFromDictByModule(self, flags_by_module_dict, flag_obj):
"""Removes a flag object from a module -> list of flags dictionary.
Args:
flags_by_module_dict: A dictionary that maps module names to lists of
flags.
flag_obj: A flag object.
"""
for unused_module, flags_in_module in flags_by_module_dict.iteritems():
# while (as opposed to if) takes care of multiple occurences of a
# flag in the list for the same module.
while flag_obj in flags_in_module:
flags_in_module.remove(flag_obj)
def SetDefault(self, name, value):
"""Changes the default value of the named flag object."""
fl = self.FlagDict()
if not fl.has_key(name):
raise AttributeError(name)
fl[name].SetDefault(value)
def __contains__(self, name):
"""Returns True if name is a value (flag) in the dict."""
return name in self.FlagDict()
has_key = __contains__ # a synonym for __contains__()
def __iter__(self):
return self.FlagDict().iterkeys()
def __call__(self, argv):
"""Parses flags from argv; stores parsed flags into this FlagValues object.
All unparsed arguments are returned. Flags are parsed using the GNU
Program Argument Syntax Conventions, using getopt:
http://www.gnu.org/software/libc/manual/html_mono/libc.html#Getopt
Args:
argv: argument list. Can be of any type that may be converted to a list.
Returns:
The list of arguments not parsed as options, including argv[0]
Raises:
FlagsError: on any parsing error
"""
# Support any sequence type that can be converted to a list
argv = list(argv)
shortopts = ""
longopts = []
fl = self.FlagDict()
# This pre parses the argv list for --flagfile=<> options.
argv = argv[:1] + self.ReadFlagsFromFiles(argv[1:], force_gnu=False)
# Correct the argv to support the google style of passing boolean
# parameters. Boolean parameters may be passed by using --mybool,
# --nomybool, --mybool=(true|false|1|0). getopt does not support
# having options that may or may not have a parameter. We replace
# instances of the short form --mybool and --nomybool with their
# full forms: --mybool=(true|false).
original_argv = list(argv) # list() makes a copy
shortest_matches = None
for name, flag in fl.items():
if not flag.boolean:
continue
if shortest_matches is None:
# Determine the smallest allowable prefix for all flag names
shortest_matches = self.ShortestUniquePrefixes(fl)
no_name = 'no' + name
prefix = shortest_matches[name]
no_prefix = shortest_matches[no_name]
# Replace all occurences of this boolean with extended forms
for arg_idx in range(1, len(argv)):
arg = argv[arg_idx]
if arg.find('=') >= 0: continue
if arg.startswith('--'+prefix) and ('--'+name).startswith(arg):
argv[arg_idx] = ('--%s=true' % name)
elif arg.startswith('--'+no_prefix) and ('--'+no_name).startswith(arg):
argv[arg_idx] = ('--%s=false' % name)
# Loop over all of the flags, building up the lists of short options
# and long options that will be passed to getopt. Short options are
# specified as a string of letters, each letter followed by a colon
# if it takes an argument. Long options are stored in an array of
# strings. Each string ends with an '=' if it takes an argument.
for name, flag in fl.items():
longopts.append(name + "=")
if len(name) == 1: # one-letter option: allow short flag type also
shortopts += name
if not flag.boolean:
shortopts += ":"
longopts.append('undefok=')
undefok_flags = []
# In case --undefok is specified, loop to pick up unrecognized
# options one by one.
unrecognized_opts = []
args = argv[1:]
while True:
try:
if self.__dict__['__use_gnu_getopt']:
optlist, unparsed_args = getopt.gnu_getopt(args, shortopts, longopts)
else:
optlist, unparsed_args = getopt.getopt(args, shortopts, longopts)
break
except getopt.GetoptError, e:
if not e.opt or e.opt in fl:
# Not an unrecognized option, reraise the exception as a FlagsError
raise FlagsError(e)
# Handle an unrecognized option.
unrecognized_opts.append(e.opt)
# Remove offender from args and try again
for arg_index in range(len(args)):
if ((args[arg_index] == '--' + e.opt) or
(args[arg_index] == '-' + e.opt) or
args[arg_index].startswith('--' + e.opt + '=')):
args = args[0:arg_index] + args[arg_index+1:]
break
else:
# We should have found the option, so we don't expect to get
# here. We could assert, but raising the original exception
# might work better.
raise FlagsError(e)
for name, arg in optlist:
if name == '--undefok':
flag_names = arg.split(',')
undefok_flags.extend(flag_names)
# For boolean flags, if --undefok=boolflag is specified, then we should
# also accept --noboolflag, in addition to --boolflag.
# Since we don't know the type of the undefok'd flag, this will affect
# non-boolean flags as well.
# NOTE: You shouldn't use --undefok=noboolflag, because then we will
# accept --nonoboolflag here. We are choosing not to do the conversion
# from noboolflag -> boolflag because of the ambiguity that flag names
# can start with 'no'.
undefok_flags.extend('no' + name for name in flag_names)
continue
if name.startswith('--'):
# long option
name = name[2:]
short_option = 0
else:
# short option
name = name[1:]
short_option = 1
if fl.has_key(name):
flag = fl[name]
if flag.boolean and short_option: arg = 1
flag.Parse(arg)
# If there were unrecognized options, raise an exception unless
# the options were named via --undefok.
for opt in unrecognized_opts:
if opt not in undefok_flags:
raise UnrecognizedFlagError(opt)
if unparsed_args:
if self.__dict__['__use_gnu_getopt']:
# if using gnu_getopt just return the program name + remainder of argv.
return argv[:1] + unparsed_args
else:
# unparsed_args becomes the first non-flag detected by getopt to
# the end of argv. Because argv may have been modified above,
# return original_argv for this region.
return argv[:1] + original_argv[-len(unparsed_args):]
else:
return argv[:1]
def Reset(self):
"""Resets the values to the point before FLAGS(argv) was called."""
for f in self.FlagDict().values():
f.Unparse()
def RegisteredFlags(self):
"""Returns: a list of the names and short names of all registered flags."""
return self.FlagDict().keys()
def FlagValuesDict(self):
"""Returns: a dictionary that maps flag names to flag values."""
flag_values = {}
for flag_name in self.RegisteredFlags():
flag = self.FlagDict()[flag_name]
flag_values[flag_name] = flag.value
return flag_values
def __str__(self):
"""Generates a help string for all known flags."""
return self.GetHelp()
def GetHelp(self, prefix=''):
"""Generates a help string for all known flags."""
helplist = []
flags_by_module = self.FlagsByModuleDict()
if flags_by_module:
modules = flags_by_module.keys()
modules.sort()
# Print the help for the main module first, if possible.
main_module = _GetMainModule()
if main_module in modules:
modules.remove(main_module)
modules = [main_module] + modules
for module in modules:
self.__RenderOurModuleFlags(module, helplist)
self.__RenderModuleFlags('gflags',
_SPECIAL_FLAGS.FlagDict().values(),
helplist)
else:
# Just print one long list of flags.
self.__RenderFlagList(
self.FlagDict().values() + _SPECIAL_FLAGS.FlagDict().values(),
helplist, prefix)
return '\n'.join(helplist)
def __RenderModuleFlags(self, module, flags, output_lines, prefix=""):
"""Generates a help string for a given module."""
if not isinstance(module, str):
module = module.__name__
output_lines.append('\n%s%s:' % (prefix, module))
self.__RenderFlagList(flags, output_lines, prefix + " ")
def __RenderOurModuleFlags(self, module, output_lines, prefix=""):
"""Generates a help string for a given module."""
flags = self._GetFlagsDefinedByModule(module)
if flags:
self.__RenderModuleFlags(module, flags, output_lines, prefix)
def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=""):
"""Generates a help string for the key flags of a given module.
Args:
module: A module object or a module name (a string).
output_lines: A list of strings. The generated help message
lines will be appended to this list.
prefix: A string that is prepended to each generated help line.
"""
key_flags = self._GetKeyFlagsForModule(module)
if key_flags:
self.__RenderModuleFlags(module, key_flags, output_lines, prefix)
def ModuleHelp(self, module):
"""Describe the key flags of a module.
Args:
module: A module object or a module name (a string).
Returns:
string describing the key flags of a module.
"""
helplist = []
self.__RenderOurModuleKeyFlags(module, helplist)
return '\n'.join(helplist)
def MainModuleHelp(self):
"""Describe the key flags of the main module.
Returns:
string describing the key flags of a module.
"""
return self.ModuleHelp(_GetMainModule())
def __RenderFlagList(self, flaglist, output_lines, prefix=" "):
fl = self.FlagDict()
special_fl = _SPECIAL_FLAGS.FlagDict()
flaglist = [(flag.name, flag) for flag in flaglist]
flaglist.sort()
flagset = {}
for (name, flag) in flaglist:
# It's possible this flag got deleted or overridden since being
# registered in the per-module flaglist. Check now against the
# canonical source of current flag information, the FlagDict.
if fl.get(name, None) != flag and special_fl.get(name, None) != flag:
# a different flag is using this name now
continue
# only print help once
if flagset.has_key(flag): continue
flagset[flag] = 1
flaghelp = ""
if flag.short_name: flaghelp += "-%s," % flag.short_name
if flag.boolean:
flaghelp += "--[no]%s" % flag.name + ":"
else:
flaghelp += "--%s" % flag.name + ":"
flaghelp += " "
if flag.help:
flaghelp += flag.help
flaghelp = TextWrap(flaghelp, indent=prefix+" ",
firstline_indent=prefix)
if flag.default_as_str:
flaghelp += "\n"
flaghelp += TextWrap("(default: %s)" % flag.default_as_str,
indent=prefix+" ")
if flag.parser.syntactic_help:
flaghelp += "\n"
flaghelp += TextWrap("(%s)" % flag.parser.syntactic_help,
indent=prefix+" ")
output_lines.append(flaghelp)
def get(self, name, default):
"""Returns the value of a flag (if not None) or a default value.
Args:
name: A string, the name of a flag.
default: Default value to use if the flag value is None.
"""
value = self.__getattr__(name)
if value is not None: # Can't do if not value, b/c value might be '0' or ""
return value
else:
return default
def ShortestUniquePrefixes(self, fl):
"""Returns: dictionary; maps flag names to their shortest unique prefix."""
# Sort the list of flag names
sorted_flags = []
for name, flag in fl.items():
sorted_flags.append(name)
if flag.boolean:
sorted_flags.append('no%s' % name)
sorted_flags.sort()
# For each name in the sorted list, determine the shortest unique
# prefix by comparing itself to the next name and to the previous
# name (the latter check uses cached info from the previous loop).
shortest_matches = {}
prev_idx = 0
for flag_idx in range(len(sorted_flags)):
curr = sorted_flags[flag_idx]
if flag_idx == (len(sorted_flags) - 1):
next = None
else:
next = sorted_flags[flag_idx+1]
next_len = len(next)
for curr_idx in range(len(curr)):
if (next is None
or curr_idx >= next_len
or curr[curr_idx] != next[curr_idx]):
# curr longer than next or no more chars in common
shortest_matches[curr] = curr[:max(prev_idx, curr_idx) + 1]
prev_idx = curr_idx
break
else:
# curr shorter than (or equal to) next
shortest_matches[curr] = curr
prev_idx = curr_idx + 1 # next will need at least one more char
return shortest_matches
def __IsFlagFileDirective(self, flag_string):
"""Checks whether flag_string contain a --flagfile=<foo> directive."""
if isinstance(flag_string, type("")):
if flag_string.startswith('--flagfile='):
return 1
elif flag_string == '--flagfile':
return 1
elif flag_string.startswith('-flagfile='):
return 1
elif flag_string == '-flagfile':
return 1
else:
return 0
return 0
def ExtractFilename(self, flagfile_str):
"""Returns filename from a flagfile_str of form -[-]flagfile=filename.
The cases of --flagfile foo and -flagfile foo shouldn't be hitting
this function, as they are dealt with in the level above this
function.
"""
if flagfile_str.startswith('--flagfile='):
return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip())
elif flagfile_str.startswith('-flagfile='):
return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip())
else:
raise FlagsError('Hit illegal --flagfile type: %s' % flagfile_str)
def __GetFlagFileLines(self, filename, parsed_file_list):
"""Returns the useful (!=comments, etc) lines from a file with flags.
Args:
filename: A string, the name of the flag file.
parsed_file_list: A list of the names of the files we have
already read. MUTATED BY THIS FUNCTION.
Returns:
List of strings. See the note below.
NOTE(springer): This function checks for a nested --flagfile=<foo>
tag and handles the lower file recursively. It returns a list of
all the lines that _could_ contain command flags. This is
EVERYTHING except whitespace lines and comments (lines starting
with '#' or '//').
"""
line_list = [] # All line from flagfile.
flag_line_list = [] # Subset of lines w/o comments, blanks, flagfile= tags.
try:
file_obj = open(filename, 'r')
except IOError, e_msg:
print e_msg
print 'ERROR:: Unable to open flagfile: %s' % (filename)
return flag_line_list
line_list = file_obj.readlines()
file_obj.close()
parsed_file_list.append(filename)
# This is where we check each line in the file we just read.
for line in line_list:
if line.isspace():
pass
# Checks for comment (a line that starts with '#').
elif line.startswith('#') or line.startswith('//'):
pass
# Checks for a nested "--flagfile=<bar>" flag in the current file.
# If we find one, recursively parse down into that file.
elif self.__IsFlagFileDirective(line):
sub_filename = self.ExtractFilename(line)
# We do a little safety check for reparsing a file we've already done.
if not sub_filename in parsed_file_list:
included_flags = self.__GetFlagFileLines(sub_filename,
parsed_file_list)
flag_line_list.extend(included_flags)
else: # Case of hitting a circularly included file.
print >>sys.stderr, ('Warning: Hit circular flagfile dependency: %s'
% sub_filename)
else:
# Any line that's not a comment or a nested flagfile should get
# copied into 2nd position. This leaves earlier arguements
# further back in the list, thus giving them higher priority.
flag_line_list.append(line.strip())
return flag_line_list
def ReadFlagsFromFiles(self, argv, force_gnu=True):
"""Processes command line args, but also allow args to be read from file.
Args:
argv: A list of strings, usually sys.argv[1:], which may contain one or
more flagfile directives of the form --flagfile="./filename".
Note that the name of the program (sys.argv[0]) should be omitted.
force_gnu: If False, --flagfile parsing obeys normal flag semantics.
If True, --flagfile parsing instead follows gnu_getopt semantics.
*** WARNING *** force_gnu=False may become the future default!
Returns:
A new list which has the original list combined with what we read
from any flagfile(s).
References: Global gflags.FLAG class instance.
This function should be called before the normal FLAGS(argv) call.
This function scans the input list for a flag that looks like:
--flagfile=<somefile>. Then it opens <somefile>, reads all valid key
and value pairs and inserts them into the input list between the
first item of the list and any subsequent items in the list.
Note that your application's flags are still defined the usual way
using gflags DEFINE_flag() type functions.
Notes (assuming we're getting a commandline of some sort as our input):
--> Flags from the command line argv _should_ always take precedence!
--> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
It will be processed after the parent flag file is done.
--> For duplicate flags, first one we hit should "win".
--> In a flagfile, a line beginning with # or // is a comment.
--> Entirely blank lines _should_ be ignored.
"""
parsed_file_list = []
rest_of_args = argv
new_argv = []
while rest_of_args:
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
if self.__IsFlagFileDirective(current_arg):
# This handles the case of -(-)flagfile foo. In this case the
# next arg really is part of this one.
if current_arg == '--flagfile' or current_arg == '-flagfile':
if not rest_of_args:
raise IllegalFlagValue('--flagfile with no argument')
flag_filename = os.path.expanduser(rest_of_args[0])
rest_of_args = rest_of_args[1:]
else:
# This handles the case of (-)-flagfile=foo.
flag_filename = self.ExtractFilename(current_arg)
new_argv[0:0] = self.__GetFlagFileLines(flag_filename, parsed_file_list)
else:
new_argv.append(current_arg)
# Stop parsing after '--', like getopt and gnu_getopt.
if current_arg == '--':
break
# Stop parsing after a non-flag, like getopt.
if not current_arg.startswith('-'):
if not force_gnu and not self.__dict__['__use_gnu_getopt']:
break
if rest_of_args:
new_argv.extend(rest_of_args)
return new_argv
def FlagsIntoString(self):
"""Returns a string with the flags assignments from this FlagValues object.
This function ignores flags whose value is None. Each flag
assignment is separated by a newline.
NOTE: MUST mirror the behavior of the C++ function
CommandlineFlagsIntoString from google3/base/commandlineflags.cc.
"""
s = ''
for flag in self.FlagDict().values():
if flag.value is not None:
s += flag.Serialize() + '\n'
return s
def AppendFlagsIntoFile(self, filename):
"""Appends all flags assignments from this FlagInfo object to a file.
Output will be in the format of a flagfile.
NOTE: MUST mirror the behavior of the C++ version of
AppendFlagsIntoFile from google3/base/commandlineflags.cc.
"""
out_file = open(filename, 'a')
out_file.write(self.FlagsIntoString())
out_file.close()
def WriteHelpInXMLFormat(self, outfile=None):
"""Outputs flag documentation in XML format.
NOTE: We use element names that are consistent with those used by
the C++ command-line flag library, from
google3/base/commandlineflags_reporting.cc. We also use a few new
elements (e.g., <key>), but we do not interfere / overlap with
existing XML elements used by the C++ library. Please maintain this
consistency.
Args:
outfile: File object we write to. Default None means sys.stdout.
"""
outfile = outfile or sys.stdout
outfile.write('<?xml version=\"1.0\"?>\n')
outfile.write('<AllFlags>\n')
indent = ' '
_WriteSimpleXMLElement(outfile, 'program', os.path.basename(sys.argv[0]),
indent)
usage_doc = sys.modules['__main__'].__doc__
if not usage_doc:
usage_doc = '\nUSAGE: %s [flags]\n' % sys.argv[0]
else:
usage_doc = usage_doc.replace('%s', sys.argv[0])
_WriteSimpleXMLElement(outfile, 'usage', usage_doc, indent)
# Get list of key flags for the main module.
key_flags = self._GetKeyFlagsForModule(_GetMainModule())
# Sort flags by declaring module name and next by flag name.
flags_by_module = self.FlagsByModuleDict()
all_module_names = list(flags_by_module.keys())
all_module_names.sort()
for module_name in all_module_names:
flag_list = [(f.name, f) for f in flags_by_module[module_name]]
flag_list.sort()
for unused_flag_name, flag in flag_list:
is_key = flag in key_flags
flag.WriteInfoInXMLFormat(outfile, module_name,
is_key=is_key, indent=indent)
outfile.write('</AllFlags>\n')
outfile.flush()
# end of FlagValues definition
# The global FlagValues instance
FLAGS = FlagValues()
def _MakeXMLSafe(s):
"""Escapes <, >, and & from s, and removes XML 1.0-illegal chars."""
s = cgi.escape(s) # Escape <, >, and &
# Remove characters that cannot appear in an XML 1.0 document
# (http://www.w3.org/TR/REC-xml/#charsets).
#
# NOTE: if there are problems with current solution, one may move to
# XML 1.1, which allows such chars, if they're entity-escaped (&#xHH;).
s = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f]', '', s)
return s
def _WriteSimpleXMLElement(outfile, name, value, indent):
"""Writes a simple XML element.
Args:
outfile: File object we write the XML element to.
name: A string, the name of XML element.
value: A Python object, whose string representation will be used
as the value of the XML element.
indent: A string, prepended to each line of generated output.
"""
value_str = str(value)
if isinstance(value, bool):
# Display boolean values as the C++ flag library does: no caps.
value_str = value_str.lower()
outfile.write('%s<%s>%s</%s>\n' %
(indent, name, _MakeXMLSafe(value_str), name))
class Flag:
"""Information about a command-line flag.
'Flag' objects define the following fields:
.name - the name for this flag
.default - the default value for this flag
.default_as_str - default value as repr'd string, e.g., "'true'" (or None)
.value - the most recent parsed value of this flag; set by Parse()
.help - a help string or None if no help is available
.short_name - the single letter alias for this flag (or None)
.boolean - if 'true', this flag does not accept arguments
.present - true if this flag was parsed from command line flags.
.parser - an ArgumentParser object
.serializer - an ArgumentSerializer object
.allow_override - the flag may be redefined without raising an error
The only public method of a 'Flag' object is Parse(), but it is
typically only called by a 'FlagValues' object. The Parse() method is
a thin wrapper around the 'ArgumentParser' Parse() method. The parsed
value is saved in .value, and the .present attribute is updated. If
this flag was already present, a FlagsError is raised.
Parse() is also called during __init__ to parse the default value and
initialize the .value attribute. This enables other python modules to
safely use flags even if the __main__ module neglects to parse the
command line arguments. The .present attribute is cleared after
__init__ parsing. If the default value is set to None, then the
__init__ parsing step is skipped and the .value attribute is
initialized to None.
Note: The default value is also presented to the user in the help
string, so it is important that it be a legal value for this flag.
"""
def __init__(self, parser, serializer, name, default, help_string,
short_name=None, boolean=0, allow_override=0):
self.name = name
if not help_string:
help_string = '(no help available)'
self.help = help_string
self.short_name = short_name
self.boolean = boolean
self.present = 0
self.parser = parser
self.serializer = serializer
self.allow_override = allow_override
self.value = None
self.SetDefault(default)
def __GetParsedValueAsString(self, value):
if value is None:
return None
if self.serializer:
return repr(self.serializer.Serialize(value))
if self.boolean:
if value:
return repr('true')
else:
return repr('false')
return repr(str(value))
def Parse(self, argument):
try:
self.value = self.parser.Parse(argument)
except ValueError, e: # recast ValueError as IllegalFlagValue
raise IllegalFlagValue("flag --%s=%s: %s" % (self.name, argument, e))
self.present += 1
def Unparse(self):
if self.default is None:
self.value = None
else:
self.Parse(self.default)
self.present = 0
def Serialize(self):
if self.value is None:
return ''
if self.boolean:
if self.value:
return "--%s" % self.name
else:
return "--no%s" % self.name
else:
if not self.serializer:
raise FlagsError("Serializer not present for flag %s" % self.name)
return "--%s=%s" % (self.name, self.serializer.Serialize(self.value))
def SetDefault(self, value):
"""Changes the default value (and current value too) for this Flag."""
# We can't allow a None override because it may end up not being
# passed to C++ code when we're overriding C++ flags. So we
# cowardly bail out until someone fixes the semantics of trying to
# pass None to a C++ flag. See swig_flags.Init() for details on
# this behavior.
if value is None and self.allow_override:
raise DuplicateFlag(self.name)
self.default = value
self.Unparse()
self.default_as_str = self.__GetParsedValueAsString(self.value)
def Type(self):
"""Returns: a string that describes the type of this Flag."""
# NOTE: we use strings, and not the types.*Type constants because
# our flags can have more exotic types, e.g., 'comma separated list
# of strings', 'whitespace separated list of strings', etc.
return self.parser.Type()
def WriteInfoInXMLFormat(self, outfile, module_name, is_key=False, indent=''):
"""Writes common info about this flag, in XML format.
This is information that is relevant to all flags (e.g., name,
meaning, etc.). If you defined a flag that has some other pieces of
info, then please override _WriteCustomInfoInXMLFormat.
Please do NOT override this method.
Args:
outfile: File object we write to.
module_name: A string, the name of the module that defines this flag.
is_key: A boolean, True iff this flag is key for main module.
indent: A string that is prepended to each generated line.
"""
outfile.write(indent + '<flag>\n')
inner_indent = indent + ' '
if is_key:
_WriteSimpleXMLElement(outfile, 'key', 'yes', inner_indent)
_WriteSimpleXMLElement(outfile, 'file', module_name, inner_indent)
# Print flag features that are relevant for all flags.
_WriteSimpleXMLElement(outfile, 'name', self.name, inner_indent)
if self.short_name:
_WriteSimpleXMLElement(outfile, 'short_name', self.short_name,
inner_indent)
if self.help:
_WriteSimpleXMLElement(outfile, 'meaning', self.help, inner_indent)
_WriteSimpleXMLElement(outfile, 'default', self.default, inner_indent)
_WriteSimpleXMLElement(outfile, 'current', self.value, inner_indent)
_WriteSimpleXMLElement(outfile, 'type', self.Type(), inner_indent)
# Print extra flag features this flag may have.
self._WriteCustomInfoInXMLFormat(outfile, inner_indent)
outfile.write(indent + '</flag>\n')
def _WriteCustomInfoInXMLFormat(self, outfile, indent):
"""Writes extra info about this flag, in XML format.
"Extra" means "not already printed by WriteInfoInXMLFormat above."
Args:
outfile: File object we write to.
indent: A string that is prepended to each generated line.
"""
# Usually, the parser knows the extra details about the flag, so
# we just forward the call to it.
self.parser.WriteCustomInfoInXMLFormat(outfile, indent)
# End of Flag definition
class ArgumentParser:
"""Base class used to parse and convert arguments.
The Parse() method checks to make sure that the string argument is a
legal value and convert it to a native type. If the value cannot be
converted, it should throw a 'ValueError' exception with a human
readable explanation of why the value is illegal.
Subclasses should also define a syntactic_help string which may be
presented to the user to describe the form of the legal values.
"""
syntactic_help = ""
def Parse(self, argument):
"""Default implementation: always returns its argument unmodified."""
return argument
def Type(self):
return 'string'
def WriteCustomInfoInXMLFormat(self, outfile, indent):
pass
class ArgumentSerializer:
"""Base class for generating string representations of a flag value."""
def Serialize(self, value):
return str(value)
class ListSerializer(ArgumentSerializer):
def __init__(self, list_sep):
self.list_sep = list_sep
def Serialize(self, value):
return self.list_sep.join([str(x) for x in value])
# The DEFINE functions are explained in mode details in the module doc string.
def DEFINE(parser, name, default, help, flag_values=FLAGS, serializer=None,
**args):
"""Registers a generic Flag object.
NOTE: in the docstrings of all DEFINE* functions, "registers" is short
for "creates a new flag and registers it".
Auxiliary function: clients should use the specialized DEFINE_<type>
function instead.
Args:
parser: ArgumentParser that is used to parse the flag arguments.
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
flag_values: FlagValues object the flag will be registered with.
serializer: ArgumentSerializer that serializes the flag value.
args: Dictionary with extra keyword args that are passes to the
Flag __init__.
"""
DEFINE_flag(Flag(parser, serializer, name, default, help, **args),
flag_values)
def DEFINE_flag(flag, flag_values=FLAGS):
"""Registers a 'Flag' object with a 'FlagValues' object.
By default, the global FLAGS 'FlagValue' object is used.
Typical users will use one of the more specialized DEFINE_xxx
functions, such as DEFINE_string or DEFINE_integer. But developers
who need to create Flag objects themselves should use this function
to register their flags.
"""
# copying the reference to flag_values prevents pychecker warnings
fv = flag_values
fv[flag.name] = flag
# Tell flag_values who's defining the flag.
if isinstance(flag_values, FlagValues):
# Regarding the above isinstance test: some users pass funny
# values of flag_values (e.g., {}) in order to avoid the flag
# registration (in the past, there used to be a flag_values ==
# FLAGS test here) and redefine flags with the same name (e.g.,
# debug). To avoid breaking their code, we perform the
# registration only if flag_values is a real FlagValues object.
flag_values._RegisterFlagByModule(_GetCallingModule(), flag)
def _InternalDeclareKeyFlags(flag_names, flag_values=FLAGS):
"""Declares a flag as key for the calling module.
Internal function. User code should call DECLARE_key_flag or
ADOPT_module_key_flags instead.
Args:
flag_names: A list of strings that are names of already-registered
Flag objects.
flag_values: A FlagValues object. This should almost never need
to be overridden.
Raises:
UnrecognizedFlagError: when we refer to a flag that was not
defined yet.
"""
module = _GetCallingModule()
for flag_name in flag_names:
if flag_name not in flag_values:
raise UnrecognizedFlagError(flag_name)
flag = flag_values.FlagDict()[flag_name]
flag_values._RegisterKeyFlagForModule(module, flag)
def DECLARE_key_flag(flag_name, flag_values=FLAGS):
"""Declares one flag as key to the current module.
Key flags are flags that are deemed really important for a module.
They are important when listing help messages; e.g., if the
--helpshort command-line flag is used, then only the key flags of the
main module are listed (instead of all flags, as in the case of
--help).
Sample usage:
flags.DECLARED_key_flag('flag_1')
Args:
flag_name: A string, the name of an already declared flag.
(Redeclaring flags as key, including flags implicitly key
because they were declared in this module, is a no-op.)
flag_values: A FlagValues object. This should almost never
need to be overridden.
"""
_InternalDeclareKeyFlags([flag_name], flag_values=flag_values)
def ADOPT_module_key_flags(module, flag_values=FLAGS):
"""Declares that all flags key to a module are key to the current module.
Args:
module: A module object.
flag_values: A FlagValues object. This should almost never need
to be overridden.
Raises:
FlagsError: When given an argument that is a module name (a
string), instead of a module object.
"""
# NOTE(salcianu): an even better test would be if not
# isinstance(module, types.ModuleType) but I didn't want to import
# types for such a tiny use.
if isinstance(module, str):
raise FlagsError('Received module name %s; expected a module object.'
% module)
_InternalDeclareKeyFlags(
[f.name for f in flag_values._GetKeyFlagsForModule(module.__name__)],
flag_values=flag_values)
#
# STRING FLAGS
#
def DEFINE_string(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value can be any string."""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# BOOLEAN FLAGS
#
# and the special HELP flags.
class BooleanParser(ArgumentParser):
"""Parser of boolean values."""
def Convert(self, argument):
"""Converts the argument to a boolean; raise ValueError on errors."""
if type(argument) == str:
if argument.lower() in ['true', 't', '1']:
return True
elif argument.lower() in ['false', 'f', '0']:
return False
bool_argument = bool(argument)
if argument == bool_argument:
# The argument is a valid boolean (True, False, 0, or 1), and not just
# something that always converts to bool (list, string, int, etc.).
return bool_argument
raise ValueError('Non-boolean argument to boolean flag', argument)
def Parse(self, argument):
val = self.Convert(argument)
return val
def Type(self):
return 'bool'
class BooleanFlag(Flag):
"""Basic boolean flag.
Boolean flags do not take any arguments, and their value is either
True (1) or False (0). The false value is specified on the command
line by prepending the word 'no' to either the long or the short flag
name.
For example, if a Boolean flag was created whose long name was
'update' and whose short name was 'x', then this flag could be
explicitly unset through either --noupdate or --nox.
"""
def __init__(self, name, default, help, short_name=None, **args):
p = BooleanParser()
Flag.__init__(self, p, None, name, default, help, short_name, 1, **args)
if not self.help: self.help = "a boolean value"
def DEFINE_boolean(name, default, help, flag_values=FLAGS, **args):
"""Registers a boolean flag.
Such a boolean flag does not take an argument. If a user wants to
specify a false value explicitly, the long option beginning with 'no'
must be used: i.e. --noflag
This flag will have a value of None, True or False. None is possible
if default=None and the user does not specify the flag on the command
line.
"""
DEFINE_flag(BooleanFlag(name, default, help, **args), flag_values)
# Match C++ API to unconfuse C++ people.
DEFINE_bool = DEFINE_boolean
class HelpFlag(BooleanFlag):
"""
HelpFlag is a special boolean flag that prints usage information and
raises a SystemExit exception if it is ever found in the command
line arguments. Note this is called with allow_override=1, so other
apps can define their own --help flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "help", 0, "show this help",
short_name="?", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = str(FLAGS)
print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
if flags:
print "flags:"
print flags
sys.exit(1)
class HelpXMLFlag(BooleanFlag):
"""Similar to HelpFlag, but generates output in XML format."""
def __init__(self):
BooleanFlag.__init__(self, 'helpxml', False,
'like --help, but generates XML output',
allow_override=1)
def Parse(self, arg):
if arg:
FLAGS.WriteHelpInXMLFormat(sys.stdout)
sys.exit(1)
class HelpshortFlag(BooleanFlag):
"""
HelpshortFlag is a special boolean flag that prints usage
information for the "main" module, and rasies a SystemExit exception
if it is ever found in the command line arguments. Note this is
called with allow_override=1, so other apps can define their own
--helpshort flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "helpshort", 0,
"show usage only for this module", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = FLAGS.MainModuleHelp()
print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
if flags:
print "flags:"
print flags
sys.exit(1)
#
# FLOAT FLAGS
#
class FloatParser(ArgumentParser):
"""Parser of floating point values.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = "a"
number_name = "number"
syntactic_help = " ".join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound != None and upper_bound != None:
sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
elif lower_bound == 1:
sh = "a positive %s" % self.number_name
elif upper_bound == -1:
sh = "a negative %s" % self.number_name
elif lower_bound == 0:
sh = "a non-negative %s" % self.number_name
elif upper_bound != None:
sh = "%s <= %s" % (self.number_name, upper_bound)
elif lower_bound != None:
sh = "%s >= %s" % (self.number_name, lower_bound)
self.syntactic_help = sh
def Convert(self, argument):
"""Converts argument to a float; raises ValueError on errors."""
return float(argument)
def Parse(self, argument):
val = self.Convert(argument)
if ((self.lower_bound != None and val < self.lower_bound) or
(self.upper_bound != None and val > self.upper_bound)):
raise ValueError("%s is not %s" % (val, self.syntactic_help))
return val
def Type(self):
return 'float'
def WriteCustomInfoInXMLFormat(self, outfile, indent):
if self.lower_bound is not None:
_WriteSimpleXMLElement(outfile, 'lower_bound', self.lower_bound, indent)
if self.upper_bound is not None:
_WriteSimpleXMLElement(outfile, 'upper_bound', self.upper_bound, indent)
# End of FloatParser
def DEFINE_float(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value must be a float.
If lower_bound or upper_bound are set, then this flag must be
within the given range.
"""
parser = FloatParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# INTEGER FLAGS
#
class IntegerParser(FloatParser):
"""Parser of an integer value.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = "an"
number_name = "integer"
syntactic_help = " ".join((number_article, number_name))
def Convert(self, argument):
__pychecker__ = 'no-returnvalues'
if type(argument) == str:
base = 10
if len(argument) > 2 and argument[0] == "0" and argument[1] == "x":
base = 16
try:
return int(argument, base)
# ValueError is thrown when argument is a string, and overflows an int.
except ValueError:
return long(argument, base)
else:
try:
return int(argument)
# OverflowError is thrown when argument is numeric, and overflows an int.
except OverflowError:
return long(argument)
def Type(self):
return 'int'
def DEFINE_integer(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value must be an integer.
If lower_bound, or upper_bound are set, then this flag must be
within the given range.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# ENUM FLAGS
#
class EnumParser(ArgumentParser):
"""Parser of a string enum value (a string value from a given set).
If enum_values (see below) is not specified, any string is allowed.
"""
def __init__(self, enum_values=None):
self.enum_values = enum_values
def Parse(self, argument):
if self.enum_values and argument not in self.enum_values:
raise ValueError("value should be one of <%s>" %
"|".join(self.enum_values))
return argument
def Type(self):
return 'string enum'
class EnumFlag(Flag):
"""Basic enum flag; its value can be any string from list of enum_values."""
def __init__(self, name, default, help, enum_values=None,
short_name=None, **args):
enum_values = enum_values or []
p = EnumParser(enum_values)
g = ArgumentSerializer()
Flag.__init__(self, p, g, name, default, help, short_name, **args)
if not self.help: self.help = "an enum string"
self.help = "<%s>: %s" % ("|".join(enum_values), self.help)
def _WriteCustomInfoInXMLFormat(self, outfile, indent):
for enum_value in self.parser.enum_values:
_WriteSimpleXMLElement(outfile, 'enum_value', enum_value, indent)
def DEFINE_enum(name, default, enum_values, help, flag_values=FLAGS,
**args):
"""Registers a flag whose value can be any string from enum_values."""
DEFINE_flag(EnumFlag(name, default, help, enum_values, ** args),
flag_values)
#
# LIST FLAGS
#
class BaseListParser(ArgumentParser):
"""Base class for a parser of lists of strings.
To extend, inherit from this class; from the subclass __init__, call
BaseListParser.__init__(self, token, name)
where token is a character used to tokenize, and name is a description
of the separator.
"""
def __init__(self, token=None, name=None):
assert name
self._token = token
self._name = name
self.syntactic_help = "a %s separated list" % self._name
def Parse(self, argument):
if isinstance(argument, list):
return argument
elif argument == '':
return []
else:
return [s.strip() for s in argument.split(self._token)]
def Type(self):
return '%s separated list of strings' % self._name
class ListParser(BaseListParser):
"""Parser for a comma-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, ',', 'comma')
def WriteCustomInfoInXMLFormat(self, outfile, indent):
BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
_WriteSimpleXMLElement(outfile, 'list_separator', repr(','), indent)
class WhitespaceSeparatedListParser(BaseListParser):
"""Parser for a whitespace-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, None, 'whitespace')
def WriteCustomInfoInXMLFormat(self, outfile, indent):
BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
separators = list(string.whitespace)
separators.sort()
for ws_char in string.whitespace:
_WriteSimpleXMLElement(outfile, 'list_separator', repr(ws_char), indent)
def DEFINE_list(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a comma-separated list of strings."""
parser = ListParser()
serializer = ListSerializer(',')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
def DEFINE_spaceseplist(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a whitespace-separated list of strings.
Any whitespace can be used as a separator.
"""
parser = WhitespaceSeparatedListParser()
serializer = ListSerializer(' ')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# MULTI FLAGS
#
class MultiFlag(Flag):
"""A flag that can appear multiple time on the command-line.
The value of such a flag is a list that contains the individual values
from all the appearances of that flag on the command-line.
See the __doc__ for Flag for most behavior of this class. Only
differences in behavior are described here:
* The default value may be either a single value or a list of values.
A single value is interpreted as the [value] singleton list.
* The value of the flag is always a list, even if the option was
only supplied once, and even if the default value is a single
value
"""
def __init__(self, *args, **kwargs):
Flag.__init__(self, *args, **kwargs)
self.help += ';\n repeat this option to specify a list of values'
def Parse(self, arguments):
"""Parses one or more arguments with the installed parser.
Args:
arguments: a single argument or a list of arguments (typically a
list of default values); a single argument is converted
internally into a list containing one item.
"""
if not isinstance(arguments, list):
# Default value may be a list of values. Most other arguments
# will not be, so convert them into a single-item list to make
# processing simpler below.
arguments = [arguments]
if self.present:
# keep a backup reference to list of previously supplied option values
values = self.value
else:
# "erase" the defaults with an empty list
values = []
for item in arguments:
# have Flag superclass parse argument, overwriting self.value reference
Flag.Parse(self, item) # also increments self.present
values.append(self.value)
# put list of option values back in the 'value' attribute
self.value = values
def Serialize(self):
if not self.serializer:
raise FlagsError("Serializer not present for flag %s" % self.name)
if self.value is None:
return ''
s = ''
multi_value = self.value
for self.value in multi_value:
if s: s += ' '
s += Flag.Serialize(self)
self.value = multi_value
return s
def Type(self):
return 'multi ' + self.parser.Type()
def DEFINE_multi(parser, serializer, name, default, help, flag_values=FLAGS,
**args):
"""Registers a generic MultiFlag that parses its args with a given parser.
Auxiliary function. Normal users should NOT use it directly.
Developers who need to create their own 'Parser' classes for options
which can appear multiple times can call this module function to
register their flags.
"""
DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args),
flag_values)
def DEFINE_multistring(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of any strings.
Use the flag on the command line multiple times to place multiple
string values into the list. The 'default' may be a single string
(which will be converted into a single-element list) or a list of
strings.
"""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
def DEFINE_multi_int(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of arbitrary integers.
Use the flag on the command line multiple times to place multiple
integer values into the list. The 'default' may be a single integer
(which will be converted into a single-element list) or a list of
integers.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
# Now register the flags that we want to exist in all applications.
# These are all defined with allow_override=1, so user-apps can use
# these flagnames for their own purposes, if they want.
DEFINE_flag(HelpFlag())
DEFINE_flag(HelpshortFlag())
DEFINE_flag(HelpXMLFlag())
# Define special flags here so that help may be generated for them.
_SPECIAL_FLAGS = FlagValues()
DEFINE_string(
'flagfile', "",
"Insert flag definitions from the given file into the command line.",
_SPECIAL_FLAGS)
DEFINE_string(
'undefok', "",
"comma-separated list of flag names that it is okay to specify "
"on the command line even if the program does not define a flag "
"with that name. IMPORTANT: flags in this list that have "
"arguments MUST use the --flag=value format.", _SPECIAL_FLAGS)
|
apache-2.0
|
michaljach/ember-cli-xpagination
|
node_modules/ember-cli/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/input.py
|
292
|
114315
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = ['executable', 'shared_library', 'loadable_module']
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d." %
(contents, p.returncode))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependencies.add(dependency.ref)
dependency.DeepDependencies(dependencies)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables and loadable modules are already fully and finally linked.
# Nothing else can be a link dependency of them, there can only be
# dependencies in the sense that a dependent target might run an
# executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
|
mit
|
nugget/home-assistant
|
homeassistant/components/sensor/buienradar.py
|
2
|
22393
|
"""
Support for Buienradar.nl weather service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.buienradar/
"""
import asyncio
from datetime import datetime, timedelta
import logging
import async_timeout
import aiohttp
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_LATITUDE, CONF_LONGITUDE,
CONF_MONITORED_CONDITIONS, CONF_NAME, TEMP_CELSIUS)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import (
async_track_point_in_utc_time)
from homeassistant.util import dt as dt_util
REQUIREMENTS = ['buienradar==0.91']
_LOGGER = logging.getLogger(__name__)
MEASURED_LABEL = 'Measured'
TIMEFRAME_LABEL = 'Timeframe'
SYMBOL = 'symbol'
# Schedule next call after (minutes):
SCHEDULE_OK = 10
# When an error occurred, new call after (minutes):
SCHEDULE_NOK = 2
# Supported sensor types:
# Key: ['label', unit, icon]
SENSOR_TYPES = {
'stationname': ['Stationname', None, None],
'condition': ['Condition', None, None],
'conditioncode': ['Condition code', None, None],
'conditiondetailed': ['Detailed condition', None, None],
'conditionexact': ['Full condition', None, None],
'symbol': ['Symbol', None, None],
'humidity': ['Humidity', '%', 'mdi:water-percent'],
'temperature': ['Temperature', TEMP_CELSIUS, 'mdi:thermometer'],
'groundtemperature': ['Ground temperature', TEMP_CELSIUS,
'mdi:thermometer'],
'windspeed': ['Wind speed', 'm/s', 'mdi:weather-windy'],
'windforce': ['Wind force', 'Bft', 'mdi:weather-windy'],
'winddirection': ['Wind direction', None, 'mdi:compass-outline'],
'windazimuth': ['Wind direction azimuth', '°', 'mdi:compass-outline'],
'pressure': ['Pressure', 'hPa', 'mdi:gauge'],
'visibility': ['Visibility', 'm', None],
'windgust': ['Wind gust', 'm/s', 'mdi:weather-windy'],
'precipitation': ['Precipitation', 'mm/h', 'mdi:weather-pouring'],
'irradiance': ['Irradiance', 'W/m2', 'mdi:sunglasses'],
'precipitation_forecast_average': ['Precipitation forecast average',
'mm/h', 'mdi:weather-pouring'],
'precipitation_forecast_total': ['Precipitation forecast total',
'mm', 'mdi:weather-pouring'],
'temperature_1d': ['Temperature 1d', TEMP_CELSIUS, 'mdi:thermometer'],
'temperature_2d': ['Temperature 2d', TEMP_CELSIUS, 'mdi:thermometer'],
'temperature_3d': ['Temperature 3d', TEMP_CELSIUS, 'mdi:thermometer'],
'temperature_4d': ['Temperature 4d', TEMP_CELSIUS, 'mdi:thermometer'],
'temperature_5d': ['Temperature 5d', TEMP_CELSIUS, 'mdi:thermometer'],
'mintemp_1d': ['Minimum temperature 1d', TEMP_CELSIUS, 'mdi:thermometer'],
'mintemp_2d': ['Minimum temperature 2d', TEMP_CELSIUS, 'mdi:thermometer'],
'mintemp_3d': ['Minimum temperature 3d', TEMP_CELSIUS, 'mdi:thermometer'],
'mintemp_4d': ['Minimum temperature 4d', TEMP_CELSIUS, 'mdi:thermometer'],
'mintemp_5d': ['Minimum temperature 5d', TEMP_CELSIUS, 'mdi:thermometer'],
'rain_1d': ['Rain 1d', 'mm', 'mdi:weather-pouring'],
'rain_2d': ['Rain 2d', 'mm', 'mdi:weather-pouring'],
'rain_3d': ['Rain 3d', 'mm', 'mdi:weather-pouring'],
'rain_4d': ['Rain 4d', 'mm', 'mdi:weather-pouring'],
'rain_5d': ['Rain 5d', 'mm', 'mdi:weather-pouring'],
'snow_1d': ['Snow 1d', 'cm', 'mdi:snowflake'],
'snow_2d': ['Snow 2d', 'cm', 'mdi:snowflake'],
'snow_3d': ['Snow 3d', 'cm', 'mdi:snowflake'],
'snow_4d': ['Snow 4d', 'cm', 'mdi:snowflake'],
'snow_5d': ['Snow 5d', 'cm', 'mdi:snowflake'],
'rainchance_1d': ['Rainchance 1d', '%', 'mdi:weather-pouring'],
'rainchance_2d': ['Rainchance 2d', '%', 'mdi:weather-pouring'],
'rainchance_3d': ['Rainchance 3d', '%', 'mdi:weather-pouring'],
'rainchance_4d': ['Rainchance 4d', '%', 'mdi:weather-pouring'],
'rainchance_5d': ['Rainchance 5d', '%', 'mdi:weather-pouring'],
'sunchance_1d': ['Sunchance 1d', '%', 'mdi:weather-partlycloudy'],
'sunchance_2d': ['Sunchance 2d', '%', 'mdi:weather-partlycloudy'],
'sunchance_3d': ['Sunchance 3d', '%', 'mdi:weather-partlycloudy'],
'sunchance_4d': ['Sunchance 4d', '%', 'mdi:weather-partlycloudy'],
'sunchance_5d': ['Sunchance 5d', '%', 'mdi:weather-partlycloudy'],
'windforce_1d': ['Wind force 1d', 'Bft', 'mdi:weather-windy'],
'windforce_2d': ['Wind force 2d', 'Bft', 'mdi:weather-windy'],
'windforce_3d': ['Wind force 3d', 'Bft', 'mdi:weather-windy'],
'windforce_4d': ['Wind force 4d', 'Bft', 'mdi:weather-windy'],
'windforce_5d': ['Wind force 5d', 'Bft', 'mdi:weather-windy'],
'condition_1d': ['Condition 1d', None, None],
'condition_2d': ['Condition 2d', None, None],
'condition_3d': ['Condition 3d', None, None],
'condition_4d': ['Condition 4d', None, None],
'condition_5d': ['Condition 5d', None, None],
'conditioncode_1d': ['Condition code 1d', None, None],
'conditioncode_2d': ['Condition code 2d', None, None],
'conditioncode_3d': ['Condition code 3d', None, None],
'conditioncode_4d': ['Condition code 4d', None, None],
'conditioncode_5d': ['Condition code 5d', None, None],
'conditiondetailed_1d': ['Detailed condition 1d', None, None],
'conditiondetailed_2d': ['Detailed condition 2d', None, None],
'conditiondetailed_3d': ['Detailed condition 3d', None, None],
'conditiondetailed_4d': ['Detailed condition 4d', None, None],
'conditiondetailed_5d': ['Detailed condition 5d', None, None],
'conditionexact_1d': ['Full condition 1d', None, None],
'conditionexact_2d': ['Full condition 2d', None, None],
'conditionexact_3d': ['Full condition 3d', None, None],
'conditionexact_4d': ['Full condition 4d', None, None],
'conditionexact_5d': ['Full condition 5d', None, None],
'symbol_1d': ['Symbol 1d', None, None],
'symbol_2d': ['Symbol 2d', None, None],
'symbol_3d': ['Symbol 3d', None, None],
'symbol_4d': ['Symbol 4d', None, None],
'symbol_5d': ['Symbol 5d', None, None],
}
CONF_TIMEFRAME = 'timeframe'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MONITORED_CONDITIONS,
default=['symbol', 'temperature']): vol.All(
cv.ensure_list, vol.Length(min=1),
[vol.In(SENSOR_TYPES.keys())]),
vol.Inclusive(CONF_LATITUDE, 'coordinates',
'Latitude and longitude must exist together'): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, 'coordinates',
'Latitude and longitude must exist together'): cv.longitude,
vol.Optional(CONF_TIMEFRAME, default=60):
vol.All(vol.Coerce(int), vol.Range(min=5, max=120)),
vol.Optional(CONF_NAME, default='br'): cv.string,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Create the buienradar sensor."""
from homeassistant.components.weather.buienradar import DEFAULT_TIMEFRAME
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
timeframe = config.get(CONF_TIMEFRAME, DEFAULT_TIMEFRAME)
if None in (latitude, longitude):
_LOGGER.error("Latitude or longitude not set in HomeAssistant config")
return False
coordinates = {CONF_LATITUDE: float(latitude),
CONF_LONGITUDE: float(longitude)}
_LOGGER.debug("Initializing buienradar sensor coordinate %s, timeframe %s",
coordinates, timeframe)
dev = []
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
dev.append(BrSensor(sensor_type, config.get(CONF_NAME),
coordinates))
async_add_entities(dev)
data = BrData(hass, coordinates, timeframe, dev)
# schedule the first update in 1 minute from now:
await data.schedule_update(1)
class BrSensor(Entity):
"""Representation of an Buienradar sensor."""
def __init__(self, sensor_type, client_name, coordinates):
"""Initialize the sensor."""
from buienradar.buienradar import (PRECIPITATION_FORECAST, CONDITION)
self.client_name = client_name
self._name = SENSOR_TYPES[sensor_type][0]
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[self.type][1]
self._entity_picture = None
self._attribution = None
self._measured = None
self._stationname = None
self._unique_id = self.uid(coordinates)
# All continuous sensors should be forced to be updated
self._force_update = self.type != SYMBOL and \
not self.type.startswith(CONDITION)
if self.type.startswith(PRECIPITATION_FORECAST):
self._timeframe = None
def uid(self, coordinates):
"""Generate a unique id using coordinates and sensor type."""
# The combination of the location, name and sensor type is unique
return "%2.6f%2.6f%s" % (coordinates[CONF_LATITUDE],
coordinates[CONF_LONGITUDE],
self.type)
def load_data(self, data):
"""Load the sensor with relevant data."""
# Find sensor
from buienradar.buienradar import (ATTRIBUTION, CONDITION, CONDCODE,
DETAILED, EXACT, EXACTNL, FORECAST,
IMAGE, MEASURED,
PRECIPITATION_FORECAST, STATIONNAME,
TIMEFRAME)
# Check if we have a new measurement,
# otherwise we do not have to update the sensor
if self._measured == data.get(MEASURED):
return False
self._attribution = data.get(ATTRIBUTION)
self._stationname = data.get(STATIONNAME)
self._measured = data.get(MEASURED)
if self.type.endswith('_1d') or \
self.type.endswith('_2d') or \
self.type.endswith('_3d') or \
self.type.endswith('_4d') or \
self.type.endswith('_5d'):
fcday = 0
if self.type.endswith('_2d'):
fcday = 1
if self.type.endswith('_3d'):
fcday = 2
if self.type.endswith('_4d'):
fcday = 3
if self.type.endswith('_5d'):
fcday = 4
# update all other sensors
if self.type.startswith(SYMBOL) or self.type.startswith(CONDITION):
try:
condition = data.get(FORECAST)[fcday].get(CONDITION)
except IndexError:
_LOGGER.warning("No forecast for fcday=%s...", fcday)
return False
if condition:
new_state = condition.get(CONDITION, None)
if self.type.startswith(SYMBOL):
new_state = condition.get(EXACTNL, None)
if self.type.startswith('conditioncode'):
new_state = condition.get(CONDCODE, None)
if self.type.startswith('conditiondetailed'):
new_state = condition.get(DETAILED, None)
if self.type.startswith('conditionexact'):
new_state = condition.get(EXACT, None)
img = condition.get(IMAGE, None)
if new_state != self._state or img != self._entity_picture:
self._state = new_state
self._entity_picture = img
return True
return False
try:
self._state = data.get(FORECAST)[fcday].get(self.type[:-3])
return True
except IndexError:
_LOGGER.warning("No forecast for fcday=%s...", fcday)
return False
if self.type == SYMBOL or self.type.startswith(CONDITION):
# update weather symbol & status text
condition = data.get(CONDITION, None)
if condition:
if self.type == SYMBOL:
new_state = condition.get(EXACTNL, None)
if self.type == CONDITION:
new_state = condition.get(CONDITION, None)
if self.type == 'conditioncode':
new_state = condition.get(CONDCODE, None)
if self.type == 'conditiondetailed':
new_state = condition.get(DETAILED, None)
if self.type == 'conditionexact':
new_state = condition.get(EXACT, None)
img = condition.get(IMAGE, None)
if new_state != self._state or img != self._entity_picture:
self._state = new_state
self._entity_picture = img
return True
return False
if self.type.startswith(PRECIPITATION_FORECAST):
# update nested precipitation forecast sensors
nested = data.get(PRECIPITATION_FORECAST)
self._timeframe = nested.get(TIMEFRAME)
self._state = nested.get(self.type[len(PRECIPITATION_FORECAST)+1:])
return True
# update all other sensors
self._state = data.get(self.type)
return True
@property
def attribution(self):
"""Return the attribution."""
return self._attribution
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def entity_picture(self):
"""Weather symbol if type is symbol."""
return self._entity_picture
@property
def device_state_attributes(self):
"""Return the state attributes."""
from buienradar.buienradar import (PRECIPITATION_FORECAST)
if self.type.startswith(PRECIPITATION_FORECAST):
result = {ATTR_ATTRIBUTION: self._attribution}
if self._timeframe is not None:
result[TIMEFRAME_LABEL] = "%d min" % (self._timeframe)
return result
result = {
ATTR_ATTRIBUTION: self._attribution,
SENSOR_TYPES['stationname'][0]: self._stationname,
}
if self._measured is not None:
# convert datetime (Europe/Amsterdam) into local datetime
local_dt = dt_util.as_local(self._measured)
result[MEASURED_LABEL] = local_dt.strftime("%c")
return result
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Return possible sensor specific icon."""
return SENSOR_TYPES[self.type][2]
@property
def force_update(self):
"""Return true for continuous sensors, false for discrete sensors."""
return self._force_update
class BrData:
"""Get the latest data and updates the states."""
def __init__(self, hass, coordinates, timeframe, devices):
"""Initialize the data object."""
self.devices = devices
self.data = {}
self.hass = hass
self.coordinates = coordinates
self.timeframe = timeframe
async def update_devices(self):
"""Update all devices/sensors."""
if self.devices:
tasks = []
# Update all devices
for dev in self.devices:
if dev.load_data(self.data):
tasks.append(dev.async_update_ha_state())
if tasks:
await asyncio.wait(tasks, loop=self.hass.loop)
async def schedule_update(self, minute=1):
"""Schedule an update after minute minutes."""
_LOGGER.debug("Scheduling next update in %s minutes.", minute)
nxt = dt_util.utcnow() + timedelta(minutes=minute)
async_track_point_in_utc_time(self.hass, self.async_update,
nxt)
async def get_data(self, url):
"""Load data from specified url."""
from buienradar.buienradar import (CONTENT,
MESSAGE, STATUS_CODE, SUCCESS)
_LOGGER.debug("Calling url: %s...", url)
result = {SUCCESS: False, MESSAGE: None}
resp = None
try:
websession = async_get_clientsession(self.hass)
with async_timeout.timeout(10, loop=self.hass.loop):
resp = await websession.get(url)
result[STATUS_CODE] = resp.status
result[CONTENT] = await resp.text()
if resp.status == 200:
result[SUCCESS] = True
else:
result[MESSAGE] = "Got http statuscode: %d" % (resp.status)
return result
except (asyncio.TimeoutError, aiohttp.ClientError) as err:
result[MESSAGE] = "%s" % err
return result
finally:
if resp is not None:
await resp.release()
async def async_update(self, *_):
"""Update the data from buienradar."""
from buienradar.buienradar import (parse_data, CONTENT,
DATA, MESSAGE, STATUS_CODE, SUCCESS)
content = await self.get_data('http://xml.buienradar.nl')
if not content.get(SUCCESS, False):
content = await self.get_data('http://api.buienradar.nl')
if content.get(SUCCESS) is not True:
# unable to get the data
_LOGGER.warning("Unable to retrieve xml data from Buienradar."
"(Msg: %s, status: %s,)",
content.get(MESSAGE),
content.get(STATUS_CODE),)
# schedule new call
await self.schedule_update(SCHEDULE_NOK)
return
# rounding coordinates prevents unnecessary redirects/calls
rainurl = 'http://gadgets.buienradar.nl/data/raintext/?lat={}&lon={}'
rainurl = rainurl.format(
round(self.coordinates[CONF_LATITUDE], 2),
round(self.coordinates[CONF_LONGITUDE], 2)
)
raincontent = await self.get_data(rainurl)
if raincontent.get(SUCCESS) is not True:
# unable to get the data
_LOGGER.warning("Unable to retrieve raindata from Buienradar."
"(Msg: %s, status: %s,)",
raincontent.get(MESSAGE),
raincontent.get(STATUS_CODE),)
# schedule new call
await self.schedule_update(SCHEDULE_NOK)
return
result = parse_data(content.get(CONTENT),
raincontent.get(CONTENT),
self.coordinates[CONF_LATITUDE],
self.coordinates[CONF_LONGITUDE],
self.timeframe)
_LOGGER.debug("Buienradar parsed data: %s", result)
if result.get(SUCCESS) is not True:
if int(datetime.now().strftime('%H')) > 0:
_LOGGER.warning("Unable to parse data from Buienradar."
"(Msg: %s)",
result.get(MESSAGE),)
await self.schedule_update(SCHEDULE_NOK)
return
self.data = result.get(DATA)
await self.update_devices()
await self.schedule_update(SCHEDULE_OK)
@property
def attribution(self):
"""Return the attribution."""
from buienradar.buienradar import ATTRIBUTION
return self.data.get(ATTRIBUTION)
@property
def stationname(self):
"""Return the name of the selected weatherstation."""
from buienradar.buienradar import STATIONNAME
return self.data.get(STATIONNAME)
@property
def condition(self):
"""Return the condition."""
from buienradar.buienradar import CONDITION
return self.data.get(CONDITION)
@property
def temperature(self):
"""Return the temperature, or None."""
from buienradar.buienradar import TEMPERATURE
try:
return float(self.data.get(TEMPERATURE))
except (ValueError, TypeError):
return None
@property
def pressure(self):
"""Return the pressure, or None."""
from buienradar.buienradar import PRESSURE
try:
return float(self.data.get(PRESSURE))
except (ValueError, TypeError):
return None
@property
def humidity(self):
"""Return the humidity, or None."""
from buienradar.buienradar import HUMIDITY
try:
return int(self.data.get(HUMIDITY))
except (ValueError, TypeError):
return None
@property
def visibility(self):
"""Return the visibility, or None."""
from buienradar.buienradar import VISIBILITY
try:
return int(self.data.get(VISIBILITY))
except (ValueError, TypeError):
return None
@property
def wind_speed(self):
"""Return the windspeed, or None."""
from buienradar.buienradar import WINDSPEED
try:
return float(self.data.get(WINDSPEED))
except (ValueError, TypeError):
return None
@property
def wind_bearing(self):
"""Return the wind bearing, or None."""
from buienradar.buienradar import WINDAZIMUTH
try:
return int(self.data.get(WINDAZIMUTH))
except (ValueError, TypeError):
return None
@property
def forecast(self):
"""Return the forecast data."""
from buienradar.buienradar import FORECAST
return self.data.get(FORECAST)
|
apache-2.0
|
guo-yu/pynotes
|
examples/def.py
|
2
|
1046
|
# -*- coding: UTF-8 –*-
#!/usr/bin/python
# Filename : def.py
# 使用 def 关键词定义一个函数
# 什么是函数?
# 函数是一些代码片段的组合
# 你可以使用你希望使用的名字来定义函数,然后使用这个函数的名字来调用函数
# 我们来试试看将 helloworld 包装在一个函数中进行使用
def helloworld(text="hello world!"):
print text
# 现在我们有了一个函数,函数有一些参数,参数在()中声明,在 python 中,你可以用 a=b 的方式来定义参数的默认数值,也可以不定义,像下面这样:
def helloword_without_default_param(text):
print text
# 如你所见,我可以使用任何我想使用的名字来定义函数,甚至带有下划线的函数名
# 现在我们如何执行他们呢?
helloworld() # => 'hello world!'
helloworld('good night!') # => 'good night!'
helloword_without_default_param(text='hello again!') # => 'hello again!'
# 现在你明白如何定义函数,以及执行函数了,自己写写看。
|
mit
|
vmthunder/nova
|
nova/tests/objects/test_dns_domain.py
|
16
|
2997
|
# Copyright (C) 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import db
from nova.objects import dns_domain
from nova.tests.objects import test_objects
fake_dnsd = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'domain': 'blah.example.com',
'scope': 'private',
'availability_zone': 'overthere',
'project_id': '867530niner',
}
class _TestDNSDomain(object):
@staticmethod
def _compare(test, db, obj):
for field, value in db.items():
test.assertEqual(db[field], obj[field])
def test_get_by_domain(self):
with mock.patch.object(db, 'dnsdomain_get') as get:
get.return_value = fake_dnsd
dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
self._compare(self, fake_dnsd, dnsd)
def test_register_for_zone(self):
dns_domain.DNSDomain.register_for_zone(self.context.elevated(),
'domain', 'zone')
dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
self.assertEqual('domain', dnsd.domain)
self.assertEqual('zone', dnsd.availability_zone)
def test_register_for_project(self):
dns_domain.DNSDomain.register_for_project(self.context.elevated(),
'domain', 'project')
dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
self.assertEqual('domain', dnsd.domain)
self.assertEqual('project', dnsd.project_id)
def test_delete_by_domain(self):
dns_domain.DNSDomain.register_for_zone(self.context.elevated(),
'domain', 'zone')
dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
self.assertEqual('domain', dnsd.domain)
self.assertEqual('zone', dnsd.availability_zone)
dns_domain.DNSDomain.delete_by_domain(self.context.elevated(),
'domain')
dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
self.assertIsNone(dnsd)
def test_get_all(self):
with mock.patch.object(db, 'dnsdomain_get_all') as get:
get.return_value = [fake_dnsd]
dns_domain.DNSDomainList.get_all(self.context)
class TestDNSDomainObject(test_objects._LocalTest,
_TestDNSDomain):
pass
class TestRemoteDNSDomainObject(test_objects._RemoteTest,
_TestDNSDomain):
pass
|
apache-2.0
|
jinxuan/googletest
|
test/gtest_catch_exceptions_test.py
|
2139
|
9901
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
TEST_LIST = gtest_test_utils.Subprocess(
[EXE_PATH, LIST_TESTS_FLAG], env=environ).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess(
[EX_EXE_PATH], env=environ).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG],
env=environ).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
|
bsd-3-clause
|
sander76/home-assistant
|
homeassistant/components/mystrom/switch.py
|
15
|
2895
|
"""Support for myStrom switches/plugs."""
import logging
from pymystrom.exceptions import MyStromConnectionError
from pymystrom.switch import MyStromSwitch as _MyStromSwitch
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_HOST, CONF_NAME
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
DEFAULT_NAME = "myStrom Switch"
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the myStrom switch/plug integration."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
try:
plug = _MyStromSwitch(host)
await plug.get_state()
except MyStromConnectionError as err:
_LOGGER.error("No route to myStrom plug: %s", host)
raise PlatformNotReady() from err
async_add_entities([MyStromSwitch(plug, name)])
class MyStromSwitch(SwitchEntity):
"""Representation of a myStrom switch/plug."""
def __init__(self, plug, name):
"""Initialize the myStrom switch/plug."""
self._name = name
self.plug = plug
self._available = True
self.relay = None
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return bool(self.relay)
@property
def unique_id(self):
"""Return a unique ID."""
return self.plug._mac # pylint: disable=protected-access
@property
def current_power_w(self):
"""Return the current power consumption in W."""
return self.plug.consumption
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self._available
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
try:
await self.plug.turn_on()
except MyStromConnectionError:
_LOGGER.error("No route to myStrom plug")
async def async_turn_off(self, **kwargs):
"""Turn the switch off."""
try:
await self.plug.turn_off()
except MyStromConnectionError:
_LOGGER.error("No route to myStrom plug")
async def async_update(self):
"""Get the latest data from the device and update the data."""
try:
await self.plug.get_state()
self.relay = self.plug.relay
self._available = True
except MyStromConnectionError:
self._available = False
_LOGGER.error("No route to myStrom plug")
|
apache-2.0
|
lmaycotte/quark
|
quark/tests/test_nvp_driver.py
|
2
|
56364
|
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import aiclib
import contextlib
import mock
import neutron.extensions.securitygroup as sg_ext
from oslo_config import cfg
import quark.drivers.nvp_driver
from quark import exceptions as q_exc
from quark.tests import test_base
class TestNVPDriver(test_base.TestBase):
def setUp(self):
super(TestNVPDriver, self).setUp()
cfg.CONF.set_override('environment_capabilities', [], 'QUARK')
if not hasattr(self, 'driver'):
self.driver = quark.drivers.nvp_driver.NVPDriver()
cfg.CONF.clear_override('environment_capabilities', 'QUARK')
cfg.CONF.set_override('max_rules_per_group', 3, 'NVP')
cfg.CONF.set_override('max_rules_per_port', 1, 'NVP')
self.driver.max_ports_per_switch = 0
self.lswitch_uuid = "12345678-1234-1234-1234-123456781234"
self.context.tenant_id = "tid"
self.lport_uuid = "12345678-0000-0000-0000-123456781234"
self.net_id = "12345678-1234-1234-1234-123412341234"
self.port_id = "12345678-0000-0000-0000-123412341234"
self.profile_id = "12345678-0000-0000-0000-000000000000"
self.d_pkg = "quark.drivers.nvp_driver.NVPDriver"
self.max_spanning = 3
self.driver.limits.update({'max_rules_per_group': 3,
'max_rules_per_port': 2})
def _create_connection(self, switch_count=1,
has_switches=False, maxed_ports=False):
connection = mock.Mock()
lswitch = self._create_lswitch(has_switches, maxed_ports=maxed_ports)
lswitchport = self._create_lswitch_port(self.lswitch_uuid,
switch_count)
connection.lswitch_port = mock.Mock(return_value=lswitchport)
connection.lswitch = mock.Mock(return_value=lswitch)
return connection
def _create_lswitch_port(self, switch_uuid, switch_count):
port = mock.Mock()
port.create = mock.Mock(return_value={'uuid': self.lport_uuid})
port_query = self._create_lport_query(switch_count)
port.query = mock.Mock(return_value=port_query)
port.delete = mock.Mock(return_value=None)
port.attachment_vif = mock.Mock()
return port
def _create_lport_query(self, switch_count, profiles=[]):
query = mock.Mock()
port_list = {"_relations":
{"LogicalSwitchConfig":
{"uuid": self.lswitch_uuid,
"security_profiles": profiles}}}
port_query = {"results": [port_list], "result_count": switch_count}
query.results = mock.Mock(return_value=port_query)
query.security_profile_uuid().results.return_value = {
"results": [{"security_profiles": profiles}]}
return query
def _create_lswitch(self, switches_available, maxed_ports):
lswitch = mock.Mock()
lswitch.query = mock.Mock(
return_value=self.
_create_lswitch_query(switches_available, maxed_ports))
lswitch.create = mock.Mock(return_value={'uuid': self.lswitch_uuid})
lswitch.delete = mock.Mock(return_value=None)
return lswitch
def _create_lswitch_query(self, switches_available, maxed_ports):
query = mock.Mock()
port_count = 0
if maxed_ports:
port_count = self.max_spanning
lswitch_list = [{'uuid': 'abcd',
'_relations': {
'LogicalSwitchStatus': {
'lport_count': port_count}}}]
if not switches_available:
lswitch_list = []
lswitch_query = {"results": lswitch_list}
query.relations = mock.Mock(return_value=None)
query.results = mock.Mock(return_value=lswitch_query)
return query
def _create_security_profile(self):
profile = mock.Mock()
query = mock.Mock()
group = {'name': 'foo', 'uuid': self.profile_id,
'logical_port_ingress_rules': [],
'logical_port_egress_rules': []}
query.results = mock.Mock(return_value={'results': [group],
'result_count': 1})
profile.query = mock.Mock(return_value=query)
profile.read = mock.Mock(return_value=group)
return mock.Mock(return_value=profile)
def _create_security_rule(self, rule={}):
return lambda *x, **y: dict(y, ethertype=x[0])
class TestNVPDriverCreateNetwork(TestNVPDriver):
@contextlib.contextmanager
def _stubs(self):
with contextlib.nested(
mock.patch("%s._connection" % self.d_pkg),
) as (conn,):
connection = self._create_connection()
conn.return_value = connection
yield connection
def test_create_network(self):
with self._stubs() as (connection):
self.driver.create_network(self.context, "test")
self.assertTrue(connection.lswitch().create.called)
self.assertTrue(connection.lswitch().transport_zone.called)
def test_create_network_name_longer_than_40_chars_gets_trimmed(self):
with self._stubs() as (connection):
long_n = 'A' * 50
self.driver.create_network(self.context, long_n)
self.assertTrue(connection.lswitch().create.called)
self.assertTrue(connection.lswitch().transport_zone.called)
connection.lswitch().display_name.assert_called_with(long_n[:40])
class TestNVPDriverDefaultTransportZoneBindings(TestNVPDriver):
def setUp(self):
super(TestNVPDriverDefaultTransportZoneBindings, self).setUp()
cfg.CONF.set_override(
'additional_default_tz_types', ['vxlan'], 'NVP')
cfg.CONF.set_override(
'default_tz', 'tz_uuid', 'NVP')
cfg.CONF.set_override(
'default_tz_type', 'stt', 'NVP')
def tearDown(self):
super(TestNVPDriverDefaultTransportZoneBindings, self).setUp()
cfg.CONF.clear_override('additional_default_tz_types', 'NVP')
cfg.CONF.clear_override('default_tz', 'NVP')
cfg.CONF.clear_override('default_tz_type', 'NVP')
@contextlib.contextmanager
def _stubs(self):
with contextlib.nested(
mock.patch("quark.drivers.nvp_driver.SA_REGISTRY."
"get_strategy"),
mock.patch("%s._connection" % self.d_pkg),
mock.patch("%s._lswitches_for_network" % self.d_pkg),
) as (sa_get_strategy, conn, switch_list):
connection = self._create_connection()
conn.return_value = connection
ret = {"results": [{"uuid": self.lswitch_uuid}]}
switch_list().results = mock.Mock(return_value=ret)
sa_strategy = mock.Mock()
sa_get_strategy.return_value = sa_strategy
sa_strategy.allocate.return_value = {"id": 123}
yield sa_get_strategy, sa_strategy, connection
def test_default_tz_bindings_net_create(self):
with self._stubs() as (sa_get_strategy, sa_strategy, connection):
self.driver.create_network(
self.context, "test", network_id="network_id")
self.assertTrue(connection.lswitch().create.called)
# assert vxlan tz manager was called
sa_strategy.allocate.assert_called_once_with(
self.context, 'tz_uuid', 'network_id')
# assert transport_zone was called:
# once for the default configured tz type (stt)
# once for the additional default tz type (vxlan)
self.assertEqual(
connection.lswitch().transport_zone.call_args_list,
[mock.call('tz_uuid', 'stt'),
mock.call('tz_uuid', 'vxlan', vxlan_id=123)]
)
def test_default_tz_bindings_net_delete(self):
with self._stubs() as (sa_get_strategy, sa_strategy, connection):
self.driver.delete_network(self.context, "network_id")
self.assertTrue(connection.lswitch().delete.called)
sa_strategy.deallocate.assert_called_once_with(
self.context, 'tz_uuid', 'network_id')
class TestNVPDriverProviderNetwork(TestNVPDriver):
"""Testing all of the network types is unnecessary, but a nice have."""
@contextlib.contextmanager
def _stubs(self, tz):
with contextlib.nested(
mock.patch("%s._connection" % self.d_pkg),
) as (conn,):
connection = self._create_connection()
switch = self._create_lswitch(1, False)
switch.transport_zone = mock.Mock()
tz_results = mock.Mock()
tz_results.results = mock.Mock(return_value=tz)
tz_query = mock.Mock()
tz_query.query = mock.Mock(return_value=tz_results)
connection.transportzone = mock.Mock(return_value=tz_query)
conn.return_value = connection
yield connection, switch
def test_config_provider_attrs_flat_net(self):
tz = dict(result_count=1)
with self._stubs(tz) as (connection, switch):
self.driver._config_provider_attrs(
connection=connection, switch=switch, phys_net="net_uuid",
net_type="flat", segment_id=None)
switch.transport_zone.assert_called_with(
zone_uuid="net_uuid", transport_type="bridge", vlan_id=None)
def test_config_provider_attrs_vlan_net(self):
tz = dict(result_count=1)
with self._stubs(tz) as (connection, switch):
self.driver._config_provider_attrs(
connection=connection, switch=switch, phys_net="net_uuid",
net_type="vlan", segment_id=10)
switch.transport_zone.assert_called_with(
zone_uuid="net_uuid", transport_type="bridge", vlan_id=10)
def test_config_provider_attrs_gre_net(self):
tz = dict(result_count=1)
with self._stubs(tz) as (connection, switch):
self.driver._config_provider_attrs(
connection=connection, switch=switch, phys_net="net_uuid",
net_type="gre", segment_id=None)
switch.transport_zone.assert_called_with(
zone_uuid="net_uuid", transport_type="gre", vlan_id=None)
def test_config_provider_attrs_stt_net(self):
tz = dict(result_count=1)
with self._stubs(tz) as (connection, switch):
self.driver._config_provider_attrs(
connection=connection, switch=switch, phys_net="net_uuid",
net_type="stt", segment_id=None)
switch.transport_zone.assert_called_with(
zone_uuid="net_uuid", transport_type="stt", vlan_id=None)
def test_config_provider_attrs_local_net(self):
tz = dict(result_count=1)
with self._stubs(tz) as (connection, switch):
self.driver._config_provider_attrs(
connection=connection, switch=switch, phys_net="net_uuid",
net_type="local", segment_id=None)
switch.transport_zone.assert_called_with(
zone_uuid="net_uuid", transport_type="local", vlan_id=None)
def test_config_provider_attrs_bridge_net(self):
"""A specialized case for NVP
This exists because internal driver calls can also call this method,
and they may pass bridge in as the type as that's how it's known
to NVP.
"""
tz = dict(result_count=1)
with self._stubs(tz) as (connection, switch):
self.driver._config_provider_attrs(
connection=connection, switch=switch, phys_net="net_uuid",
net_type="bridge", segment_id=None)
switch.transport_zone.assert_called_with(
zone_uuid="net_uuid", transport_type="bridge", vlan_id=None)
def test_config_provider_attrs_no_phys_net_or_type(self):
with self._stubs({}) as (connection, switch):
self.driver._config_provider_attrs(
connection=connection, switch=switch, phys_net=None,
net_type=None, segment_id=None)
self.assertFalse(switch.transport_zone.called)
def test_config_provider_attrs_vlan_net_no_segment_id_fails(self):
with self._stubs({}) as (connection, switch):
self.assertRaises(
q_exc.SegmentIdRequired,
self.driver._config_provider_attrs, connection=connection,
switch=switch, phys_net="net_uuid", net_type="vlan",
segment_id=None)
def test_config_provider_attrs_non_vlan_net_with_segment_id_fails(self):
with self._stubs({}) as (connection, switch):
self.assertRaises(
q_exc.SegmentIdUnsupported,
self.driver._config_provider_attrs, connection=connection,
switch=switch, phys_net="net_uuid", net_type="flat",
segment_id=10)
def test_config_phys_net_no_phys_type_fails(self):
with self._stubs({}) as (connection, switch):
self.assertRaises(
q_exc.ProvidernetParamError,
self.driver._config_provider_attrs, connection=connection,
switch=switch, phys_net="net_uuid", net_type=None,
segment_id=None)
def test_config_no_phys_net_with_phys_type_fails(self):
with self._stubs({}) as (connection, switch):
self.assertRaises(
q_exc.ProvidernetParamError,
self.driver._config_provider_attrs, connection=connection,
switch=switch, phys_net=None, net_type="flat",
segment_id=None)
def test_config_physical_net_doesnt_exist_fails(self):
tz = dict(result_count=0)
with self._stubs(tz) as (connection, switch):
self.assertRaises(
q_exc.PhysicalNetworkNotFound,
self.driver._config_provider_attrs, connection=connection,
switch=switch, phys_net="net_uuid", net_type="flat",
segment_id=None)
def test_config_physical_net_bad_net_type_fails(self):
with self._stubs({}) as (connection, switch):
self.assertRaises(
q_exc.InvalidPhysicalNetworkType,
self.driver._config_provider_attrs, connection=connection,
switch=switch, phys_net="net_uuid", net_type="lol",
segment_id=None)
class TestNVPDriverDeleteNetwork(TestNVPDriver):
@contextlib.contextmanager
def _stubs(self, network_exists=True):
with contextlib.nested(
mock.patch("%s._connection" % self.d_pkg),
mock.patch("%s._lswitches_for_network" % self.d_pkg),
) as (conn, switch_list):
connection = self._create_connection()
conn.return_value = connection
if network_exists:
ret = {"results": [{"uuid": self.lswitch_uuid}]}
else:
ret = {"results": []}
switch_list().results = mock.Mock(return_value=ret)
yield connection
def test_delete_network(self):
with self._stubs() as (connection):
self.driver.delete_network(self.context, "test")
self.assertTrue(connection.lswitch().delete.called)
def test_delete_network_not_exists(self):
with self._stubs(network_exists=False) as (connection):
self.driver.delete_network(self.context, "test")
self.assertFalse(connection.lswitch().delete.called)
def test_delete_network_not_exists_404_exception(self):
with self._stubs(network_exists=True) as (connection):
self.driver.delete_network(self.context, "test")
self.assertTrue(connection.lswitch().delete.called)
class TestNVPDriverDeleteNetworkWithExceptions(TestNVPDriver):
@contextlib.contextmanager
def _stubs(self, network_exists=True, exception=None):
with contextlib.nested(
mock.patch("%s._connection" % self.d_pkg),
mock.patch("%s._lswitches_for_network" % self.d_pkg),
mock.patch("%s._lswitch_delete" % self.d_pkg),
) as (conn, switch_list, switch_delete):
connection = self._create_connection()
conn.return_value = connection
if network_exists:
ret = {"results": [{"uuid": self.lswitch_uuid}]}
else:
ret = {"results": []}
switch_list().results = mock.Mock(return_value=ret)
if exception:
switch_delete.side_effect = exception
yield connection
def test_delete_network_with_404_aicexception(self):
e = aiclib.core.AICException(404, 'foo')
with self._stubs(exception=e) as connection:
try:
with self.assertRaises(type(e)):
self.driver.delete_network(self.context, "test")
self.fail("AssertionError should have been raised.")
except AssertionError as ae:
self.assertEqual(ae.args[0], "AICException not raised")
self.assertFalse(connection.lswitch().delete.called)
def test_delete_network_with_500_aicexception(self):
e = aiclib.core.AICException(500, 'foo')
with self._stubs(exception=e) as connection:
try:
with self.assertRaises(type(e)):
self.driver.delete_network(self.context, "test")
self.fail("AssertionError should have been raised.")
except AssertionError as ae:
self.assertEqual(ae.args[0], "AICException not raised")
self.assertFalse(connection.lswitch().delete.called)
def test_delete_network_with_normal_exception(self):
e = StandardError('foo')
with self._stubs(exception=e) as connection:
try:
with self.assertRaises(type(e)):
self.driver.delete_network(self.context, "test")
self.fail("AssertionError should have been raised.")
except AssertionError as ae:
self.assertEqual(ae.args[0], "StandardError not raised")
self.assertFalse(connection.lswitch().delete.called)
class TestNVPDriverCreatePort(TestNVPDriver):
'''In all cases an lswitch should be queried.'''
@contextlib.contextmanager
def _stubs(self, has_lswitch=True, maxed_ports=False, net_details=None):
with contextlib.nested(
mock.patch("%s._connection" % self.d_pkg),
mock.patch("%s._next_connection" % self.d_pkg),
mock.patch("%s._lswitches_for_network" % self.d_pkg),
mock.patch("%s._get_network_details" % self.d_pkg),
) as (conn, next_conn, get_switches, get_net_dets):
connection = self._create_connection(has_switches=has_lswitch,
maxed_ports=maxed_ports)
conn.return_value = connection
get_switches.return_value = connection.lswitch().query()
get_net_dets.return_value = net_details
yield connection
def test_select_ipam_strategy(self):
strategy = self.driver.select_ipam_strategy(1, "ANY")
self.assertEqual(strategy, "ANY")
def test_create_port_switch_exists(self):
with self._stubs(net_details=dict(foo=3)) as (connection):
port = self.driver.create_port(self.context, self.net_id,
self.port_id)
self.assertTrue("uuid" in port)
self.assertTrue(connection.lswitch_port().attachment_vif.called)
self.assertFalse(connection.lswitch().create.called)
self.assertTrue(connection.lswitch_port().create.called)
self.assertTrue(connection.lswitch().query.called)
status_args, kwargs = (
connection.lswitch_port().admin_status_enabled.call_args)
self.assertTrue(True in status_args)
def test_create_port_switch_exists_tags(self):
with self._stubs(net_details=dict(foo=3)) as (connection):
device_id = "foo"
port = self.driver.create_port(self.context, self.net_id,
self.port_id, device_id=device_id)
self.assertTrue("uuid" in port)
self.assertTrue(connection.lswitch_port().attachment_vif.called)
self.assertFalse(connection.lswitch().create.called)
self.assertTrue(connection.lswitch_port().create.called)
self.assertTrue(connection.lswitch().query.called)
status_args, kwargs = (
connection.lswitch_port().admin_status_enabled.call_args)
self.assertTrue(True in status_args)
connection.lswitch_port().assert_has_calls([mock.call.tags([
dict(scope="neutron_net_id", tag=self.net_id),
dict(scope="neutron_port_id", tag=self.port_id),
dict(scope="os_tid", tag=self.context.tenant_id),
dict(scope="vm_id", tag=device_id)
])], any_order=True)
def test_create_port_switch_not_exists(self):
with self._stubs(has_lswitch=False,
net_details=dict(foo=3)) as (connection):
port = self.driver.create_port(self.context, self.net_id,
self.port_id)
self.assertTrue("uuid" in port)
self.assertTrue(connection.lswitch().create.called)
self.assertTrue(connection.lswitch_port().create.called)
self.assertTrue(connection.lswitch().query.called)
status_args, kwargs = (
connection.lswitch_port().admin_status_enabled.call_args)
self.assertTrue(True in status_args)
def test_create_port_no_existing_switches_fails(self):
with self._stubs(has_lswitch=False):
self.assertRaises(q_exc.BadNVPState, self.driver.create_port,
self.context, self.net_id, self.port_id, False)
def test_create_disabled_port_switch_not_exists(self):
with self._stubs(has_lswitch=False,
net_details=dict(foo=3)) as (connection):
port = self.driver.create_port(self.context, self.net_id,
self.port_id, False)
self.assertTrue("uuid" in port)
self.assertTrue(connection.lswitch().create.called)
self.assertTrue(connection.lswitch_port().create.called)
self.assertTrue(connection.lswitch().query.called)
status_args, kwargs = (
connection.lswitch_port().admin_status_enabled.call_args)
self.assertTrue(False in status_args)
def test_create_port_switch_exists_spanning(self):
with self._stubs(maxed_ports=True,
net_details=dict(foo=3)) as (connection):
self.driver.limits['max_ports_per_switch'] = self.max_spanning
port = self.driver.create_port(self.context, self.net_id,
self.port_id)
self.assertTrue("uuid" in port)
self.assertTrue(connection.lswitch().create.called)
self.assertTrue(connection.lswitch_port().create.called)
self.assertTrue(connection.lswitch().query.called)
status_args, kwargs = (
connection.lswitch_port().admin_status_enabled.call_args)
self.assertTrue(True in status_args)
def test_create_port_switch_not_exists_spanning(self):
with self._stubs(has_lswitch=False, maxed_ports=True,
net_details=dict(foo=3)) as (connection):
self.driver.max_ports_per_switch = self.max_spanning
port = self.driver.create_port(self.context, self.net_id,
self.port_id)
self.assertTrue("uuid" in port)
self.assertTrue(connection.lswitch().create.called)
self.assertTrue(connection.lswitch_port().create.called)
self.assertTrue(connection.lswitch().query.called)
status_args, kwargs = (
connection.lswitch_port().admin_status_enabled.call_args)
self.assertTrue(True in status_args)
def test_create_disabled_port_switch_not_exists_spanning(self):
with self._stubs(has_lswitch=False, maxed_ports=True,
net_details=dict(foo=3)) as (connection):
self.driver.max_ports_per_switch = self.max_spanning
port = self.driver.create_port(self.context, self.net_id,
self.port_id, False)
self.assertTrue("uuid" in port)
self.assertTrue(connection.lswitch().create.called)
self.assertTrue(connection.lswitch_port().create.called)
self.assertTrue(connection.lswitch().query.called)
status_args, kwargs = (
connection.lswitch_port().admin_status_enabled.call_args)
self.assertTrue(False in status_args)
def test_create_port_with_security_groups(self):
cfg.CONF.set_override('environment_capabilities', [], 'QUARK')
with self._stubs() as connection:
connection.securityprofile = self._create_security_profile()
self.driver.create_port(self.context, self.net_id,
self.port_id,
security_groups=[1])
connection.lswitch_port().assert_has_calls([
mock.call.security_profiles([self.profile_id]),
], any_order=True)
cfg.CONF.clear_override('environment_capabilities', 'QUARK')
def test_create_port_with_security_groups_max_rules(self):
cfg.CONF.set_override('environment_capabilities', [], 'QUARK')
with self._stubs() as connection:
connection.securityprofile = self._create_security_profile()
connection.securityprofile().read().update(
{'logical_port_ingress_rules': [{'ethertype': 'IPv4'},
{'ethertype': 'IPv6'}],
'logical_port_egress_rules': [{'ethertype': 'IPv4'},
{'ethertype': 'IPv6'}]})
with self.assertRaises(sg_ext.nexception.InvalidInput):
self.driver.create_port(
self.context, self.net_id, self.port_id,
security_groups=[1])
cfg.CONF.clear_override('environment_capabilities', 'QUARK')
class TestNVPDriverUpdatePort(TestNVPDriver):
@contextlib.contextmanager
def _stubs(self):
with contextlib.nested(
mock.patch("%s._connection" % self.d_pkg),
mock.patch("%s._next_connection" % self.d_pkg),
) as (conn, next_conn):
connection = self._create_connection()
connection.securityprofile = self._create_security_profile()
conn.return_value = connection
yield connection
def test_update_port(self):
cfg.CONF.set_override('environment_capabilities', [], 'QUARK')
with self._stubs() as connection:
self.driver.update_port(
self.context, self.port_id,
security_groups=[1])
connection.lswitch_port().assert_has_calls([
mock.call.security_profiles([self.profile_id]),
], any_order=True)
cfg.CONF.clear_override('environment_capabilities', 'QUARK')
def test_update_port_max_rules(self):
cfg.CONF.set_override('environment_capabilities', [], 'QUARK')
with self._stubs() as connection:
connection.securityprofile().read().update(
{'logical_port_ingress_rules': [{'ethertype': 'IPv4'},
{'ethertype': 'IPv6'}],
'logical_port_egress_rules': [{'ethertype': 'IPv4'},
{'ethertype': 'IPv6'}]})
with self.assertRaises(sg_ext.nexception.InvalidInput):
self.driver.update_port(
self.context, self.port_id,
security_groups=[1])
cfg.CONF.clear_override('environment_capabilities', 'QUARK')
class TestNVPDriverLswitchesForNetwork(TestNVPDriver):
@contextlib.contextmanager
def _stubs(self, single_switch=True):
with contextlib.nested(
mock.patch("%s._connection" % self.d_pkg),
) as (conn,):
connection = self._create_connection(
has_switches=True, switch_count=1)
conn.return_value = connection
yield connection
def test_get_lswitches(self):
"""Test exists for coverage. No decisions are made."""
with self._stubs() as connection:
query_mock = mock.Mock()
query_mock.tags = mock.Mock()
query_mock.tagscopes = mock.Mock()
connection.query = mock.Mock(return_value=query_mock)
self.driver._lswitches_for_network(self.context, "net_uuid")
def test_get_lswitch_ids_for_network(self):
with self._stubs() as connection:
query_mock = mock.Mock()
query_mock.tags = mock.Mock()
query_mock.tagscopes = mock.Mock()
connection.query = mock.Mock(return_value=query_mock)
lswitch_ids = self.driver.get_lswitch_ids_for_network(
self.context, "net_uuid")
self.assertEqual(lswitch_ids, ['abcd'])
class TestSwitchCopying(TestNVPDriver):
def test_no_existing_switches(self):
switches = dict(results=[])
args = self.driver._get_network_details(None, 1, switches)
self.assertTrue(args == {})
def test_has_switches_no_transport_zones(self):
switch = dict(display_name="public", transport_zones=[])
switches = dict(results=[switch])
args = self.driver._get_network_details(None, 1, switches)
self.assertEqual(args["network_name"], "public")
self.assertEqual(args["phys_net"], None)
def test_has_switches_and_transport_zones(self):
transport_zones = [dict(zone_uuid="zone_uuid",
transport_type="bridge")]
switch = dict(display_name="public", transport_zones=transport_zones)
switches = dict(results=[switch])
args = self.driver._get_network_details(None, 1, switches)
self.assertEqual(args["network_name"], "public")
self.assertEqual(args["phys_net"], "zone_uuid")
self.assertEqual(args["phys_type"], "bridge")
def test_has_switches_tz_and_vlan(self):
binding = dict(vlan_translation=[dict(transport=10)])
transport_zones = [dict(zone_uuid="zone_uuid",
transport_type="bridge",
binding_config=binding)]
switch = dict(display_name="public", transport_zones=transport_zones)
switches = dict(results=[switch])
args = self.driver._get_network_details(None, 1, switches)
self.assertEqual(args["network_name"], "public")
self.assertEqual(args["phys_net"], "zone_uuid")
self.assertEqual(args["phys_type"], "bridge")
class TestNVPDriverDeletePort(TestNVPDriver):
@contextlib.contextmanager
def _stubs(self, switch_count=1):
with contextlib.nested(
mock.patch("%s._connection" % self.d_pkg),
mock.patch("%s._next_connection" % self.d_pkg),
) as (conn, next_conn):
connection = self._create_connection(switch_count=switch_count)
conn.return_value = connection
yield connection
def test_delete_port(self):
with self._stubs() as (connection):
self.driver.delete_port(self.context, self.port_id)
self.assertTrue(connection.lswitch_port().delete.called)
def test_delete_port_switch_given(self):
with self._stubs() as (connection):
self.driver.delete_port(self.context, self.port_id,
lswitch_uuid=self.lswitch_uuid)
self.assertFalse(connection.lswitch_port().query.called)
self.assertTrue(connection.lswitch_port().delete.called)
def test_delete_port_many_switches(self):
with self._stubs(switch_count=2):
try:
with self.assertRaises(Exception): # noqa
self.driver.delete_port(self.context, self.port_id)
except AssertionError as ae:
self.assertEqual(ae.args[0], "Exception not raised")
def test_delete_port_no_switch_bad_data(self):
with self._stubs(switch_count=0):
try:
with self.assertRaises(Exception): # noqa
self.driver.delete_port(self.context, self.port_id)
except AssertionError as ae:
self.assertEqual(ae.args[0], "Exception not raised")
class TestNVPDriverDeletePortWithExceptions(TestNVPDriver):
@contextlib.contextmanager
def _stubs(self, switch_exception=None, delete_exception=None):
with contextlib.nested(
mock.patch("%s._connection" % self.d_pkg),
mock.patch("%s._lswitch_from_port" % self.d_pkg),
) as (conn, switch):
connection = self._create_connection()
conn.return_value = connection
if switch_exception:
switch.side_effect = switch_exception
else:
switch = mock.Mock(return_value=1)
if delete_exception:
connection.lswitch_port.delete.side_effect = delete_exception
yield connection
def test_delete_port_with_switch_query_general_exception(self):
e = Exception('foo')
with self._stubs(switch_exception=e) as (connection):
try:
with self.assertRaises(type(e)):
self.driver.delete_port(self.context, 'test')
self.fail("AssertionError should have been raised.")
except AssertionError as ae:
self.assertEqual(ae.args[0], "Exception not raised")
self.assertFalse(connection.lswitch_port().delete.called)
def test_delete_port_with_switch_query_404_aic_exception(self):
e = aiclib.core.AICException(404, 'foo')
with self._stubs(switch_exception=e) as (connection):
try:
with self.assertRaises(type(e)):
self.driver.delete_port(self.context, 'test')
self.fail("AssertionError should have been raised.")
except AssertionError as ae:
self.assertEqual(ae.args[0], "AICException not raised")
self.assertFalse(connection.lswitch_port().delete.called)
def test_delete_port_with_switch_query_500_aic_exception(self):
e = aiclib.core.AICException(500, 'foo')
with self._stubs(switch_exception=e) as (connection):
try:
with self.assertRaises(type(e)):
self.driver.delete_port(self.context, 'test')
self.fail("AssertionError should have been raised.")
except AssertionError as ae:
self.assertEqual(ae.args[0], "AICException not raised")
self.assertFalse(connection.lswitch_port().delete.called)
def test_delete_port_with_delete_general_exception(self):
e = Exception('foo')
with self._stubs(delete_exception=e) as (connection):
try:
with self.assertRaises(type(e)):
self.driver.delete_port(self.context, 'test')
self.fail("AssertionError should have been raised.")
except AssertionError as ae:
self.assertEqual(ae.args[0], "Exception not raised")
self.assertTrue(connection.lswitch_port().delete.called)
def test_delete_port_with_delete_404_aic_exception(self):
e = aiclib.core.AICException(404, 'foo')
with self._stubs(delete_exception=e) as (connection):
try:
with self.assertRaises(type(e)):
self.driver.delete_port(self.context, 'test')
self.fail("AssertionError should have been raised.")
except AssertionError as ae:
self.assertEqual(ae.args[0], "AICException not raised")
self.assertTrue(connection.lswitch_port().delete.called)
def test_delete_port_with_delete_500_aic_exception(self):
e = aiclib.core.AICException(500, 'foo')
with self._stubs(delete_exception=e) as (connection):
try:
with self.assertRaises(type(e)):
self.driver.delete_port(self.context, 'test')
self.fail("AssertionError should have been raised.")
except AssertionError as ae:
self.assertEqual(ae.args[0], "AICException not raised")
self.assertTrue(connection.lswitch_port().delete.called)
class TestNVPDriverCreateSecurityGroup(TestNVPDriver):
@contextlib.contextmanager
def _stubs(self):
with contextlib.nested(
mock.patch("%s._connection" % self.d_pkg),
mock.patch("%s._next_connection" % self.d_pkg),
) as (conn, next_conn):
connection = self._create_connection()
connection.securityprofile = self._create_security_profile()
conn.return_value = connection
yield connection
def test_security_group_create(self):
group = {'group_id': 1}
with self._stubs() as connection:
self.driver.create_security_group(
self.context, 'foo', **group)
connection.securityprofile().assert_has_calls([
mock.call.display_name('foo'),
mock.call.create(),
], any_order=True)
def test_security_group_create_with_rules(self):
ingress_rules = [{'ethertype': 'IPv4'}, {'ethertype': 'IPv4',
'protocol': 6}]
egress_rules = [{'ethertype': 'IPv6', 'protocol': 17}]
group = {'group_id': 1, 'port_ingress_rules': ingress_rules,
'port_egress_rules': egress_rules}
with self._stubs() as connection:
self.driver.create_security_group(
self.context, 'foo', **group)
connection.securityprofile().assert_has_calls([
mock.call.display_name('foo'),
mock.call.port_egress_rules(egress_rules),
mock.call.port_ingress_rules(ingress_rules),
mock.call.tags([{'scope': 'neutron_group_id', 'tag': 1},
{'scope': 'os_tid',
'tag': self.context.tenant_id}]),
], any_order=True)
def test_security_group_create_rules_at_max(self):
ingress_rules = [{'ethertype': 'IPv4', 'protocol': 6},
{'ethertype': 'IPv6',
'remote_ip_prefix': '192.168.0.1'}]
egress_rules = [{'ethertype': 'IPv4', 'protocol': 17,
'port_range_min': 0, 'port_range_max': 100},
{'ethertype': 'IPv4', 'remote_group_id': 2}]
with self._stubs():
with self.assertRaises(sg_ext.nexception.InvalidInput):
self.driver.create_security_group(
self.context, 'foo',
port_ingress_rules=ingress_rules,
port_egress_rules=egress_rules)
class TestNVPDriverDeleteSecurityGroup(TestNVPDriver):
@contextlib.contextmanager
def _stubs(self):
with contextlib.nested(
mock.patch("%s._connection" % self.d_pkg),
mock.patch("%s._next_connection" % self.d_pkg),
) as (conn, next_conn):
connection = self._create_connection()
connection.securityprofile = self._create_security_profile()
conn.return_value = connection
yield connection
def test_security_group_delete(self):
with self._stubs() as connection:
self.driver.delete_security_group(self.context, 1)
connection.securityprofile().query().assert_has_calls([
mock.call.tagscopes(['os_tid', 'neutron_group_id']),
mock.call.tags([self.context.tenant_id, 1]),
], any_order=True)
connection.securityprofile.assert_any_call(self.profile_id)
self.assertTrue(connection.securityprofile().delete)
def test_security_group_delete_not_found(self):
with self._stubs() as connection:
connection.securityprofile().query().results.return_value = {
'result_count': 0, 'results': []}
with self.assertRaises(sg_ext.SecurityGroupNotFound):
self.driver.delete_security_group(self.context, 1)
class TestNVPDriverUpdateSecurityGroup(TestNVPDriver):
@contextlib.contextmanager
def _stubs(self):
with contextlib.nested(
mock.patch("%s._connection" % self.d_pkg),
mock.patch("%s._next_connection" % self.d_pkg),
) as (conn, next_conn):
connection = self._create_connection()
connection.securityprofile = self._create_security_profile()
conn.return_value = connection
yield connection
def test_security_group_update(self):
with self._stubs() as connection:
self.driver.update_security_group(self.context, 1, name='bar')
connection.securityprofile.assert_any_call(self.profile_id)
connection.securityprofile().assert_has_calls([
mock.call.display_name('bar'),
mock.call.update()],
any_order=True)
def test_security_group_update_not_found(self):
with self._stubs() as connection:
connection.securityprofile().query().results.return_value = {
'result_count': 0, 'results': []}
with self.assertRaises(sg_ext.SecurityGroupNotFound):
self.driver.update_security_group(self.context, 1)
def test_security_group_update_with_rules(self):
ingress_rules = [{'ethertype': 'IPv4', 'protocol': 6},
{'ethertype': 'IPv6',
'remote_ip_prefix': '192.168.0.1'}]
egress_rules = [{'ethertype': 'IPv4', 'protocol': 17,
'port_range_min': 0, 'port_range_max': 100}]
with self._stubs() as connection:
self.driver.update_security_group(
self.context, 1,
port_ingress_rules=ingress_rules,
port_egress_rules=egress_rules)
connection.securityprofile.assert_any_call(self.profile_id)
connection.securityprofile().assert_has_calls([
mock.call.port_ingress_rules(ingress_rules),
mock.call.port_egress_rules(egress_rules),
mock.call.update(),
], any_order=True)
def test_security_group_update_rules_at_max(self):
ingress_rules = [{'ethertype': 'IPv4', 'protocol': 6},
{'ethertype': 'IPv6',
'remote_ip_prefix': '192.168.0.1'}]
egress_rules = [{'ethertype': 'IPv4', 'protocol': 17,
'port_range_min': 0, 'port_range_max': 100},
{'ethertype': 'IPv4', 'remote_group_id': 2}]
with self._stubs():
with self.assertRaises(sg_ext.nexception.InvalidInput):
self.driver.update_security_group(
self.context, 1,
port_ingress_rules=ingress_rules,
port_egress_rules=egress_rules)
class TestNVPDriverCreateSecurityGroupRule(TestNVPDriver):
@contextlib.contextmanager
def _stubs(self):
with contextlib.nested(
mock.patch("%s._connection" % self.d_pkg),
mock.patch("%s._next_connection" % self.d_pkg),
) as (conn, next_conn):
connection = self._create_connection()
connection.securityprofile = self._create_security_profile()
connection.securityrule = self._create_security_rule()
connection.lswitch_port().query.return_value = (
self._create_lport_query(1, [self.profile_id]))
conn.return_value = connection
yield connection
def test_security_rule_create(self):
with self._stubs() as connection:
self.driver.create_security_group_rule(
self.context, 1,
{'ethertype': 'IPv4', 'direction': 'ingress'})
connection.securityprofile.assert_any_call(self.profile_id)
connection.securityprofile().assert_has_calls([
mock.call.port_ingress_rules([{'ethertype': 'IPv4'}]),
mock.call.update(),
], any_order=True)
def test_security_rule_create_with_ip_prefix_and_profile(self):
with self._stubs() as connection:
self.driver.create_security_group_rule(
self.context, 1,
{'ethertype': 'IPv4', 'direction': 'ingress',
'remote_ip_prefix': "pre", "remote_group_id": "group",
"protocol": "udp"})
connection.securityprofile.assert_any_call(self.profile_id)
connection.securityprofile().assert_has_calls([
mock.call.port_ingress_rules([{'ethertype': 'IPv4',
"ip_prefix": "pre",
"profile_uuid": "group",
"protocol": "udp"}]),
mock.call.update(),
], any_order=True)
def test_security_rule_create_invalid_direction(self):
with self._stubs():
with self.assertRaises(AttributeError):
self.driver.create_security_group_rule(
self.context, 1,
{'ethertype': 'IPv4', 'direction': 'instantregret'})
def test_security_rule_create_duplicate(self):
with self._stubs() as connection:
connection.securityprofile().read().update({
'logical_port_ingress_rules': [{'ethertype': 'IPv4'}],
'logical_port_egress_rules': []})
with self.assertRaises(sg_ext.SecurityGroupRuleExists):
self.driver.create_security_group_rule(
self.context, 1,
{'ethertype': 'IPv4', 'direction': 'ingress'})
def test_security_rule_create_not_found(self):
with self._stubs() as connection:
connection.securityprofile().query().results.return_value = {
'result_count': 0, 'results': []}
with self.assertRaises(sg_ext.SecurityGroupNotFound):
self.driver.create_security_group_rule(
self.context, 1,
{'ethertype': 'IPv4', 'direction': 'egress'})
def test_security_rule_create_over_port(self):
with self._stubs() as connection:
connection.securityprofile().read().update(
{'logical_port_ingress_rules': [1, 2]})
with self.assertRaises(sg_ext.nexception.InvalidInput):
self.driver.create_security_group_rule(
self.context, 1,
{'ethertype': 'IPv4', 'direction': 'egress'})
self.assertTrue(connection.lswitch_port().query.called)
class TestNVPDriverDeleteSecurityGroupRule(TestNVPDriver):
@contextlib.contextmanager
def _stubs(self, rules=[]):
rulelist = {'logical_port_ingress_rules': [],
'logical_port_egress_rules': []}
for rule in rules:
rulelist['logical_port_%s_rules' % rule.pop('direction')].append(
rule)
with contextlib.nested(
mock.patch("%s._connection" % self.d_pkg),
) as (conn,):
connection = self._create_connection()
connection.securityprofile = self._create_security_profile()
connection.securityrule = self._create_security_rule()
connection.securityprofile().read().update(rulelist)
conn.return_value = connection
yield connection
def test_delete_security_group(self):
with self._stubs(
rules=[{'ethertype': 'IPv4', 'direction': 'ingress'},
{'ethertype': 'IPv6', 'direction': 'egress'}]
) as connection:
self.driver.delete_security_group_rule(
self.context, 1, {'ethertype': 'IPv6', 'direction': 'egress'})
connection.securityprofile.assert_any_call(self.profile_id)
connection.securityprofile().assert_has_calls([
mock.call.port_egress_rules([]),
mock.call.update(),
], any_order=True)
def test_delete_security_group_does_not_exist(self):
with self._stubs(rules=[{'ethertype': 'IPv4',
'direction': 'ingress'}]):
with self.assertRaises(sg_ext.SecurityGroupRuleNotFound):
self.driver.delete_security_group_rule(
self.context, 1,
{'ethertype': 'IPv6', 'direction': 'egress'})
class TestNVPDriverLoadConfig(TestNVPDriver):
def test_load_config(self):
controllers = "192.168.221.139:443:admin:admin:30:10:2:2"
cfg.CONF.set_override("controller_connection", [controllers], "NVP")
self.driver.load_config()
conn = self.driver.nvp_connections[0]
self.assertEqual(conn["username"], "admin")
self.assertEqual(conn["retries"], 2)
self.assertEqual(conn["redirects"], '2')
self.assertEqual(conn["http_timeout"], 10)
self.assertEqual(conn["req_timeout"], "30")
self.assertEqual(conn["default_tz"], None)
self.assertEqual(conn["password"], "admin")
self.assertEqual(conn["ip_address"], "192.168.221.139")
self.assertEqual(conn["port"], "443")
cfg.CONF.clear_override("controller_connection", "NVP")
def test_load_config_no_connections(self):
self.driver.load_config()
self.assertEqual(len(self.driver.nvp_connections), 0)
class TestNVPDriverLoadConfigRandomController(TestNVPDriver):
@mock.patch("random.randint")
def test_load_config(self, randint):
controllers = "192.168.221.139:443:admin:admin:30:10:2:2"
cfg.CONF.set_override("controller_connection", [controllers], "NVP")
cfg.CONF.set_override("random_initial_controller", True,
"NVP")
randint.return_value = 0
self.driver.load_config()
self.assertTrue(randint.called)
cfg.CONF.clear_override("controller_connection", "NVP")
cfg.CONF.clear_override("random_initial_controller", "NVP")
class TestNVPGetConnection(TestNVPDriver):
@contextlib.contextmanager
def _stubs(self, has_conn):
controllers = "192.168.221.139:443:admin:admin:30:10:2:2"
cfg.CONF.set_override("controller_connection", [controllers], "NVP")
if has_conn:
self.driver.nvp_connections.append(dict(connection="foo",
usages=0))
else:
self.driver.nvp_connections.append(dict(port="443",
ip_address="192.168.0.1",
username="admin",
password="admin",
http_timeout=10,
retries=1,
backoff=0,
usages=0))
with contextlib.nested(
mock.patch("aiclib.nvp.Connection"),
mock.patch("%s._next_connection" % self.d_pkg)
) as (aiclib_conn, next_conn):
yield aiclib_conn, next_conn
cfg.CONF.clear_override("controller_connection", "NVP")
def test_get_connection(self):
with self._stubs(has_conn=False) as (aiclib_conn, next_conn):
with self.driver.get_connection():
pass
self.assertTrue(aiclib_conn.called)
self.assertFalse(next_conn.called)
def test_get_connection_connection_defined(self):
with self._stubs(has_conn=True) as (aiclib_conn, next_conn):
with self.driver.get_connection():
pass
self.assertFalse(aiclib_conn.called)
self.assertFalse(next_conn.called)
def test_get_connection_iterates(self):
with self._stubs(has_conn=True) as (aiclib_conn, next_conn):
try:
with self.driver.get_connection():
raise Exception("Failure")
except Exception:
pass
self.assertFalse(aiclib_conn.called)
self.assertTrue(next_conn.called)
def test_get_connection_with_threshold(self):
cfg.CONF.set_override("connection_switching_threshold", 1, "NVP")
with self._stubs(has_conn=True) as (aiclib_conn, next_conn):
with self.driver.get_connection():
pass
self.assertFalse(aiclib_conn.called)
self.assertTrue(next_conn.called)
cfg.CONF.clear_override("connection_switching_threshold", "NVP")
def test_get_connection_with_threshold_next_conn_not_called(self):
cfg.CONF.set_override("connection_switching_threshold", 2, "NVP")
with self._stubs(has_conn=True) as (aiclib_conn, next_conn):
with self.driver.get_connection():
pass
self.assertFalse(aiclib_conn.called)
self.assertFalse(next_conn.called)
cfg.CONF.clear_override("connection_switching_threshold", "NVP")
class TestNVPGetConnectionNoneDefined(TestNVPDriver):
def test_get_connection(self):
with self.assertRaises(q_exc.NoBackendConnectionsDefined):
with self.driver.get_connection():
pass
class TestNVPNextConnection(TestNVPDriver):
@contextlib.contextmanager
def _stubs(self, rand=False):
controllers = "192.168.221.139:443:admin:admin:30:10:2:2"
cfg.CONF.set_override("controller_connection", [controllers], "NVP")
if rand:
cfg.CONF.set_override("connection_switching_random", True, "NVP")
conn1 = dict(port="443", ip_address="192.168.0.1", username="admin",
password="admin", http_timeout=10, retries=1, backoff=0,
usages=0)
conn2 = conn1.copy()
conn2["ip_address"] = "192.168.0.2"
self.driver.nvp_connections.extend([conn1, conn2])
with contextlib.nested(
mock.patch("random.randint")
) as (randint,):
randint.return_value = 1
yield randint
cfg.CONF.clear_override("controller_connection", "NVP")
if rand:
cfg.CONF.clear_override("connection_switching_random", "NVP")
def test_get_connection(self):
with self._stubs() as randint:
self.driver._next_connection()
self.assertEqual(1, self.driver.conn_index)
self.assertFalse(randint.called)
def test_get_connection_random(self):
with self._stubs(rand=True) as randint:
self.driver._next_connection()
self.assertEqual(1, self.driver.conn_index)
self.assertTrue(randint.called)
|
apache-2.0
|
joyaether/zxing
|
cpp/scons/scons-local-2.0.0.final.0/SCons/Memoize.py
|
34
|
9640
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Memoize.py 5023 2010/06/14 22:05:46 scons"
__doc__ = """Memoizer
A metaclass implementation to count hits and misses of the computed
values that various methods cache in memory.
Use of this modules assumes that wrapped methods be coded to cache their
values in a consistent way. Here is an example of wrapping a method
that returns a computed value, with no input parameters:
memoizer_counters = [] # Memoization
memoizer_counters.append(SCons.Memoize.CountValue('foo')) # Memoization
def foo(self):
try: # Memoization
return self._memo['foo'] # Memoization
except KeyError: # Memoization
pass # Memoization
result = self.compute_foo_value()
self._memo['foo'] = result # Memoization
return result
Here is an example of wrapping a method that will return different values
based on one or more input arguments:
def _bar_key(self, argument): # Memoization
return argument # Memoization
memoizer_counters.append(SCons.Memoize.CountDict('bar', _bar_key)) # Memoization
def bar(self, argument):
memo_key = argument # Memoization
try: # Memoization
memo_dict = self._memo['bar'] # Memoization
except KeyError: # Memoization
memo_dict = {} # Memoization
self._memo['dict'] = memo_dict # Memoization
else: # Memoization
try: # Memoization
return memo_dict[memo_key] # Memoization
except KeyError: # Memoization
pass # Memoization
result = self.compute_bar_value(argument)
memo_dict[memo_key] = result # Memoization
return result
At one point we avoided replicating this sort of logic in all the methods
by putting it right into this module, but we've moved away from that at
present (see the "Historical Note," below.).
Deciding what to cache is tricky, because different configurations
can have radically different performance tradeoffs, and because the
tradeoffs involved are often so non-obvious. Consequently, deciding
whether or not to cache a given method will likely be more of an art than
a science, but should still be based on available data from this module.
Here are some VERY GENERAL guidelines about deciding whether or not to
cache return values from a method that's being called a lot:
-- The first question to ask is, "Can we change the calling code
so this method isn't called so often?" Sometimes this can be
done by changing the algorithm. Sometimes the *caller* should
be memoized, not the method you're looking at.
-- The memoized function should be timed with multiple configurations
to make sure it doesn't inadvertently slow down some other
configuration.
-- When memoizing values based on a dictionary key composed of
input arguments, you don't need to use all of the arguments
if some of them don't affect the return values.
Historical Note: The initial Memoizer implementation actually handled
the caching of values for the wrapped methods, based on a set of generic
algorithms for computing hashable values based on the method's arguments.
This collected caching logic nicely, but had two drawbacks:
Running arguments through a generic key-conversion mechanism is slower
(and less flexible) than just coding these things directly. Since the
methods that need memoized values are generally performance-critical,
slowing them down in order to collect the logic isn't the right
tradeoff.
Use of the memoizer really obscured what was being called, because
all the memoized methods were wrapped with re-used generic methods.
This made it more difficult, for example, to use the Python profiler
to figure out how to optimize the underlying methods.
"""
import types
# A flag controlling whether or not we actually use memoization.
use_memoizer = None
CounterList = []
class Counter(object):
"""
Base class for counting memoization hits and misses.
We expect that the metaclass initialization will have filled in
the .name attribute that represents the name of the function
being counted.
"""
def __init__(self, method_name):
"""
"""
self.method_name = method_name
self.hit = 0
self.miss = 0
CounterList.append(self)
def display(self):
fmt = " %7d hits %7d misses %s()"
print fmt % (self.hit, self.miss, self.name)
def __cmp__(self, other):
try:
return cmp(self.name, other.name)
except AttributeError:
return 0
class CountValue(Counter):
"""
A counter class for simple, atomic memoized values.
A CountValue object should be instantiated in a class for each of
the class's methods that memoizes its return value by simply storing
the return value in its _memo dictionary.
We expect that the metaclass initialization will fill in the
.underlying_method attribute with the method that we're wrapping.
We then call the underlying_method method after counting whether
its memoized value has already been set (a hit) or not (a miss).
"""
def __call__(self, *args, **kw):
obj = args[0]
if self.method_name in obj._memo:
self.hit = self.hit + 1
else:
self.miss = self.miss + 1
return self.underlying_method(*args, **kw)
class CountDict(Counter):
"""
A counter class for memoized values stored in a dictionary, with
keys based on the method's input arguments.
A CountDict object is instantiated in a class for each of the
class's methods that memoizes its return value in a dictionary,
indexed by some key that can be computed from one or more of
its input arguments.
We expect that the metaclass initialization will fill in the
.underlying_method attribute with the method that we're wrapping.
We then call the underlying_method method after counting whether the
computed key value is already present in the memoization dictionary
(a hit) or not (a miss).
"""
def __init__(self, method_name, keymaker):
"""
"""
Counter.__init__(self, method_name)
self.keymaker = keymaker
def __call__(self, *args, **kw):
obj = args[0]
try:
memo_dict = obj._memo[self.method_name]
except KeyError:
self.miss = self.miss + 1
else:
key = self.keymaker(*args, **kw)
if key in memo_dict:
self.hit = self.hit + 1
else:
self.miss = self.miss + 1
return self.underlying_method(*args, **kw)
class Memoizer(object):
"""Object which performs caching of method calls for its 'primary'
instance."""
def __init__(self):
pass
def Dump(title=None):
if title:
print title
CounterList.sort()
for counter in CounterList:
counter.display()
class Memoized_Metaclass(type):
def __init__(cls, name, bases, cls_dict):
super(Memoized_Metaclass, cls).__init__(name, bases, cls_dict)
for counter in cls_dict.get('memoizer_counters', []):
method_name = counter.method_name
counter.name = cls.__name__ + '.' + method_name
counter.underlying_method = cls_dict[method_name]
replacement_method = types.MethodType(counter, None, cls)
setattr(cls, method_name, replacement_method)
def EnableMemoization():
global use_memoizer
use_memoizer = 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
apache-2.0
|
slickqa/slickqaweb
|
slickqaweb/api/files.py
|
1
|
7168
|
__author__ = 'jcorbett'
from slickqaweb.app import app
from flask import request, Response
from bson import ObjectId
from slickqaweb.model.storedFile import StoredFile
from slickqaweb.model.fileChunk import FileChunk
from slickqaweb.model.serialize import deserialize_that
from .standardResponses import JsonResponse, read_request
from hashlib import md5
import re
import logging
from .apidocs import add_resource, accepts, returns, argument_doc, note
from mongoengine import ListField, EmbeddedDocumentField, ReferenceField, BinaryField
add_resource('/files', 'Upload, or Download files on slick.')
@app.route("/api/files/<file_id>")
@argument_doc('file_id', 'The id (string representation of the ObjectID) of the stored file.')
@returns(StoredFile)
def get_stored_file(file_id):
"""Get the "stored file" or the summary about the file."""
return JsonResponse(StoredFile.objects(id=ObjectId(file_id)).first())
@app.route("/api/files/<file_id>", methods=["PUT"])
@argument_doc('file_id', 'The id (string representation of the ObjectID) of the stored file.')
@accepts(StoredFile)
@returns(StoredFile)
def update_stored_file(file_id):
"""Update the properties of a stored file, you only have to include changed properties"""
stored_file = StoredFile.objects(id=ObjectId(file_id)).first()
stored_file = deserialize_that(read_request(), stored_file)
stored_file.save()
return JsonResponse(stored_file)
@app.route("/api/files", methods=["POST"])
@accepts(StoredFile)
@returns(StoredFile)
@note("The chunkSize will be set by the server for you, even if you provide it. Make sure you supply a valid mimetype.")
def create_stored_file():
"""Create a new StoredFile object to store file content for."""
new_stored_file = deserialize_that(read_request(), StoredFile())
new_stored_file.chunkSize = 262144
new_stored_file.save()
return JsonResponse(new_stored_file)
@app.route("/api/files/<file_id>/content", methods=["POST"])
@argument_doc('file_id', 'The id (string representation of the ObjectID) of the stored file.')
@accepts(BinaryField(help_text="binary data of file"))
@returns(StoredFile)
@note("Use is not recommended unless your file is really small. Instead add individual chunks to the file.")
def set_file_content(file_id):
"""Upload all the content at once (for small files)."""
stored_file = StoredFile.objects(id=ObjectId(file_id)).first()
data = request.data
stored_file.md5 = md5(data).hexdigest()
stored_file.length = len(data)
num_of_chunks = len(data) / 262144
if (len(data) % 262144) > 0:
num_of_chunks += 1
for i in range(num_of_chunks):
chunk = FileChunk()
chunk.files_id = stored_file.id
chunk.n = i
chunk.data = data[i * 262144:(i + 1) * 262144]
chunk.save()
stored_file.save()
return JsonResponse(stored_file)
@app.route("/api/files/<file_id>/addchunk", methods=["POST"])
@app.route("/api/results/<result_id>/files/<file_id>/addchunk", methods=["POST"])
@argument_doc('file_id', 'The id (string representation of the ObjectID) of the stored file.')
@accepts(BinaryField(help_text="binary data of the chunk."))
@returns(StoredFile)
def add_chunk_to_file(file_id, result_id=None):
"""Add content to a file (chunk by chunk)."""
stored_file = StoredFile.objects(id=ObjectId(file_id)).first()
num_of_chunks = len(FileChunk.objects(files_id=stored_file.id))
chunk = FileChunk()
chunk.files_id = stored_file.id
chunk.n = num_of_chunks
chunk.data = request.data
chunk.save()
stored_file.length += len(request.data)
stored_file.save()
return JsonResponse(stored_file)
#@app.route("/api/files/<file_id>/content/<filename>", methods=["HEAD"])
#def get_header_for_file(file_id, filename):
# logger = logging.getLogger('slickqaweb.api.files.get_header_for_file')
# stored_file = StoredFile.objects(id=ObjectId(file_id)).first()
# if stored_file is None:
# return Response("File with id '{}' and name '{}' not found.".format(file_id, filename), mimetype="text/plain", status=404)
# logger.debug("Returning header information for file with id {} and name {}".format(file_id, filename))
@app.route("/api/files/<file_id>/content/<filename>")
@argument_doc('file_id', 'The id (string representation of the ObjectID) of the stored file.')
@argument_doc('filename', 'The filename of the stored file. This is actually ignored, but makes for nice looking URLs.')
@returns(BinaryField(help_text="The file content."))
@note("This sets the http header to the mimetype from the stored file, and streams the file to the requester.")
def get_file_content(file_id, filename):
"""Get the content of a file."""
logger = logging.getLogger('slickqaweb.api.files.get_file_content')
stored_file = StoredFile.objects(id=ObjectId(file_id)).first()
if stored_file is None:
return Response("File with id '{}' and name '{}' not found.".format(file_id, filename), mimetype="text/plain", status=404)
range_header = request.headers.get('Range', None)
response = None
if not range_header:
logger.info("Returning file in classic mode")
def write_chunks():
for chunk in FileChunk.objects(files_id=stored_file.id).order_by('+n'):
yield chunk.data
response = Response(write_chunks(), mimetype=stored_file.mimetype, direct_passthrough=True)
else:
logger.debug("Returning file with id {} and filename {} and md5sum {} in ranged mode.".format(file_id, filename, stored_file.md5))
byte1, byte2 = 0, (stored_file.length - 1)
m = re.search('(\d+)-(\d*)', range_header)
g = m.groups()
if g[0]:
byte1 = int(g[0])
if g[1]:
possible_byte2 = int(g[1])
if possible_byte2 < byte2:
byte2 = possible_byte2
data = []
start_chunk_number = byte1 / stored_file.chunkSize
end_chunk_number = byte2 / stored_file.chunkSize
if byte2 % stored_file.chunkSize > 0:
end_chunk_number += 1
start_index = byte1 % stored_file.chunkSize
end_index = byte2 % stored_file.chunkSize
logger.debug("Using range information {}-{}/{}, chunks {}:{}-{}:{}".format(byte1, byte2, stored_file.length - 1, start_chunk_number, start_index, end_chunk_number, end_index))
def write_chunks():
for chunk in FileChunk.objects(files_id=stored_file.id).order_by('+n'):
if chunk.n >= start_chunk_number and chunk.n <= end_chunk_number:
start = 0
end = stored_file.chunkSize
if chunk.n == start_chunk_number:
start = start_index
if chunk.n == end_chunk_number:
end = end_index
yield chunk.data[start:end]
response = Response(write_chunks(), 206, mimetype=stored_file.mimetype, direct_passthrough=True)
response.headers.add('Content-Range', 'bytes {0}-{1}/{2}'.format(byte1, byte2, stored_file.length))
response.headers.add('Accept-Ranges', 'bytes')
return response
|
apache-2.0
|
tomduijf/home-assistant
|
homeassistant/components/thermostat/heat_control.py
|
2
|
7241
|
"""
homeassistant.components.thermostat.heat_control
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Adds support for a thermostat.
Specify a start time, end time and a target temperature.
If the the current temperature is lower than the target temperature,
and the time is between start time and end time, the heater will
be turned on. Opposite if the the temperature is higher than the
target temperature the heater will be turned off.
If away mode is activated the target temperature is sat to a min
temperature (min_temp in config). The min temperature is also used
as target temperature when no other temperature is specified.
If the heater is manually turned on, the target temperature will
be sat to 100*C. Meaning the thermostat probably will never turn
off the heater.
If the heater is manually turned off, the target temperature will
be sat according to normal rules. (Based on target temperature
for given time intervals and the min temperature.)
A target temperature sat with the set_temperature function will
override all other rules for the target temperature.
Config:
[thermostat]
platform=heat_control
name = Name of thermostat
heater = entity_id for heater switch,
must be a toggle device
target_sensor = entity_id for temperature sensor,
target_sensor.state must be temperature
time_temp = start_time-end_time:target_temp,
min_temp = minimum temperature, used when away mode is
active or no other temperature specified.
Example:
[thermostat]
platform=heat_control
name = Stue
heater = switch.Ovn_stue
target_sensor = tellstick_sensor.Stue_temperature
time_temp = 0700-0745:17,1500-1850:20
min_temp = 10
For the example the heater will turn on at 0700 if the temperature
is lower than 17*C away mode is false. Between 0700 and 0745 the
target temperature will be 17*C. Between 0745 and 1500 no temperature
is specified. so the min_temp of 10*C will be used. From 1500 to 1850
the target temperature is 20*, but if away mode is true the target
temperature will be sat to 10*C
"""
import logging
import datetime
import homeassistant.components as core
import homeassistant.util as util
from homeassistant.components.thermostat import ThermostatDevice
from homeassistant.helpers.event import track_state_change
from homeassistant.const import TEMP_CELCIUS, STATE_ON, STATE_OFF
TOL_TEMP = 0.3
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the heat control thermostat. """
logger = logging.getLogger(__name__)
add_devices([HeatControl(hass, config, logger)])
# pylint: disable=too-many-instance-attributes
class HeatControl(ThermostatDevice):
""" Represents a HeatControl device. """
def __init__(self, hass, config, logger):
self.logger = logger
self.hass = hass
self.heater_entity_id = config.get("heater")
self.name_device = config.get("name")
self.target_sensor_entity_id = config.get("target_sensor")
self.time_temp = []
if config.get("time_temp"):
for time_temp in list(config.get("time_temp").split(",")):
time, temp = time_temp.split(':')
time_start, time_end = time.split('-')
start_time = datetime.datetime.time(
datetime.datetime.strptime(time_start, '%H%M'))
end_time = datetime.datetime.time(
datetime.datetime.strptime(time_end, '%H%M'))
self.time_temp.append((start_time, end_time, float(temp)))
self._min_temp = util.convert(config.get("min_temp"), float, 0)
self._max_temp = util.convert(config.get("max_temp"), float, 100)
self._manual_sat_temp = None
self._away = False
self._heater_manual_changed = True
track_state_change(hass, self.heater_entity_id,
self._heater_turned_on,
STATE_OFF, STATE_ON)
track_state_change(hass, self.heater_entity_id,
self._heater_turned_off,
STATE_ON, STATE_OFF)
@property
def name(self):
""" Returns the name. """
return self.name_device
@property
def unit_of_measurement(self):
""" Returns the unit of measurement. """
return TEMP_CELCIUS
@property
def current_temperature(self):
""" Returns the current temperature. """
target_sensor = self.hass.states.get(self.target_sensor_entity_id)
if target_sensor:
return float(target_sensor.state)
else:
return None
@property
def target_temperature(self):
""" Returns the temperature we try to reach. """
if self._manual_sat_temp:
return self._manual_sat_temp
elif self._away:
return self.min_temp
else:
now = datetime.datetime.time(datetime.datetime.now())
for (start_time, end_time, temp) in self.time_temp:
if start_time < now and end_time > now:
return temp
return self.min_temp
def set_temperature(self, temperature):
""" Set new target temperature. """
if temperature is None:
self._manual_sat_temp = None
else:
self._manual_sat_temp = float(temperature)
def update(self):
""" Update current thermostat. """
heater = self.hass.states.get(self.heater_entity_id)
if heater is None:
self.logger.error("No heater available")
return
current_temperature = self.current_temperature
if current_temperature is None:
self.logger.error("No temperature available")
return
if (current_temperature - self.target_temperature) > \
TOL_TEMP and heater.state is STATE_ON:
self._heater_manual_changed = False
core.turn_off(self.hass, self.heater_entity_id)
elif (self.target_temperature - self.current_temperature) > TOL_TEMP \
and heater.state is STATE_OFF:
self._heater_manual_changed = False
core.turn_on(self.hass, self.heater_entity_id)
def _heater_turned_on(self, entity_id, old_state, new_state):
""" Heater is turned on. """
if not self._heater_manual_changed:
pass
else:
self.set_temperature(self.max_temp)
self._heater_manual_changed = True
def _heater_turned_off(self, entity_id, old_state, new_state):
""" Heater is turned off. """
if self._heater_manual_changed:
self.set_temperature(None)
@property
def is_away_mode_on(self):
"""
Returns if away mode is on.
"""
return self._away
def turn_away_mode_on(self):
""" Turns away mode on. """
self._away = True
def turn_away_mode_off(self):
""" Turns away mode off. """
self._away = False
@property
def min_temp(self):
""" Return minimum temperature. """
return self._min_temp
@property
def max_temp(self):
""" Return maxmum temperature. """
return self._max_temp
|
mit
|
postlund/home-assistant
|
homeassistant/components/geniushub/sensor.py
|
5
|
3487
|
"""Support for Genius Hub sensor devices."""
from datetime import timedelta
from typing import Any, Dict
from homeassistant.const import DEVICE_CLASS_BATTERY
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
import homeassistant.util.dt as dt_util
from . import DOMAIN, GeniusDevice, GeniusEntity
GH_STATE_ATTR = "batteryLevel"
GH_LEVEL_MAPPING = {
"error": "Errors",
"warning": "Warnings",
"information": "Information",
}
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
) -> None:
"""Set up the Genius Hub sensor entities."""
if discovery_info is None:
return
broker = hass.data[DOMAIN]["broker"]
sensors = [
GeniusBattery(broker, d, GH_STATE_ATTR)
for d in broker.client.device_objs
if GH_STATE_ATTR in d.data["state"]
]
issues = [GeniusIssue(broker, i) for i in list(GH_LEVEL_MAPPING)]
async_add_entities(sensors + issues, update_before_add=True)
class GeniusBattery(GeniusDevice):
"""Representation of a Genius Hub sensor."""
def __init__(self, broker, device, state_attr) -> None:
"""Initialize the sensor."""
super().__init__(broker, device)
self._state_attr = state_attr
self._name = f"{device.type} {device.id}"
@property
def icon(self) -> str:
"""Return the icon of the sensor."""
if "_state" in self._device.data: # only for v3 API
interval = timedelta(
seconds=self._device.data["_state"].get("wakeupInterval", 30 * 60)
)
if self._last_comms < dt_util.utcnow() - interval * 3:
return "mdi:battery-unknown"
battery_level = self._device.data["state"][self._state_attr]
if battery_level == 255:
return "mdi:battery-unknown"
if battery_level < 40:
return "mdi:battery-alert"
icon = "mdi:battery"
if battery_level <= 95:
icon += f"-{int(round(battery_level / 10 - 0.01)) * 10}"
return icon
@property
def device_class(self) -> str:
"""Return the device class of the sensor."""
return DEVICE_CLASS_BATTERY
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement of the sensor."""
return "%"
@property
def state(self) -> str:
"""Return the state of the sensor."""
level = self._device.data["state"][self._state_attr]
return level if level != 255 else 0
class GeniusIssue(GeniusEntity):
"""Representation of a Genius Hub sensor."""
def __init__(self, broker, level) -> None:
"""Initialize the sensor."""
super().__init__()
self._hub = broker.client
self._unique_id = f"{broker.hub_uid}_{GH_LEVEL_MAPPING[level]}"
self._name = f"GeniusHub {GH_LEVEL_MAPPING[level]}"
self._level = level
self._issues = []
@property
def state(self) -> str:
"""Return the number of issues."""
return len(self._issues)
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the device state attributes."""
return {f"{self._level}_list": self._issues}
async def async_update(self) -> None:
"""Process the sensor's state data."""
self._issues = [
i["description"] for i in self._hub.issues if i["level"] == self._level
]
|
apache-2.0
|
pamoakoy/invenio
|
modules/websearch/lib/search_engine_summarizer.py
|
5
|
8693
|
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Search Engine Summarizer, producing summary formats such as citesummary.
The main API is summarize_records().
"""
__lastupdated__ = """$Date$"""
__revision__ = "$Id$"
from invenio.config import CFG_INSPIRE_SITE
from invenio.bibrank_citation_searcher import get_cited_by_list
import search_engine
import invenio.template
websearch_templates = invenio.template.load('websearch')
## CFG_CITESUMMARY_COLLECTIONS -- how do we break down cite summary
## results according to collections?
if CFG_INSPIRE_SITE:
CFG_CITESUMMARY_COLLECTIONS = [['All papers', 'collection:citeable'],
['Published only', 'collection:published']]
else:
CFG_CITESUMMARY_COLLECTIONS = [['All papers', ''],
['Published only', 'collection:article']]
## CFG_CITESUMMARY_FAME_THRESHOLDS -- how do we break down cite
## summary results into famous and less famous paper groups?
CFG_CITESUMMARY_FAME_THRESHOLDS = [
(500, 1000000, 'Renowned papers (500+)'),
(250, 499, 'Famous papers (250-499)'),
(100, 249, 'Very well-known papers (100-249)'),
(50, 99, 'Well-known papers (50-99)'),
(10, 49, 'Known papers (10-49)'),
(1, 9, 'Less known papers (1-9)'),
(0, 0, 'Unknown papers (0)')
]
def summarize_records(recids, of, ln, searchpattern="", searchfield="", req=None):
"""Write summary report for records RECIDS in the format OF in language LN.
SEARCHPATTERN and SEARCHFIELD are search query that led to RECIDS,
for instance p='Smith, Paul' and f='author'. They are used for links.
REQ is the Apache/mod_python request object.
"""
if of == 'hcs':
# this is HTML cite summary
html = []
# 1) hcs prologue:
d_recids = {}
d_total_recs = {}
for coll, colldef in CFG_CITESUMMARY_COLLECTIONS:
if not colldef:
d_recids[coll] = recids
else:
d_recids[coll] = recids & search_engine.search_pattern(p=colldef)
d_total_recs[coll] = len(d_recids[coll])
prologue = websearch_templates.tmpl_citesummary_prologue(d_total_recs, CFG_CITESUMMARY_COLLECTIONS, searchpattern, searchfield, ln)
if not req:
html.append(prologue)
elif hasattr(req, "write"):
req.write(prologue)
# 2) hcs overview:
d_recid_citers = {}
d_total_cites = {}
d_avg_cites = {}
d_recid_citecount_l = {}
for coll, colldef in CFG_CITESUMMARY_COLLECTIONS:
d_total_cites[coll] = 0
d_avg_cites[coll] = 0
d_recid_citecount_l[coll] = []
d_recid_citers[coll] = get_cited_by_list(d_recids[coll])
for recid, lciters in d_recid_citers[coll]:
if lciters:
d_total_cites[coll] += len(lciters)
d_recid_citecount_l[coll].append((recid, len(lciters)))
if d_total_cites[coll] != 0:
d_avg_cites[coll] = d_total_cites[coll] * 1.0 / d_total_recs[coll]
overview = websearch_templates.tmpl_citesummary_overview(d_total_cites, d_avg_cites, CFG_CITESUMMARY_COLLECTIONS, ln)
if not req:
html.append(overview)
elif hasattr(req, "write"):
req.write(overview)
# 3) hcs break down by fame:
for low, high, fame in CFG_CITESUMMARY_FAME_THRESHOLDS:
d_cites = {}
for coll, colldef in CFG_CITESUMMARY_COLLECTIONS:
d_cites[coll] = 0
for recid, lciters in d_recid_citers[coll]:
numcites = 0
if lciters:
numcites = len(lciters)
if numcites >= low and numcites <= high:
d_cites[coll] += 1
fame_info = websearch_templates.tmpl_citesummary_breakdown_by_fame(d_cites, low, high, fame, CFG_CITESUMMARY_COLLECTIONS, searchpattern, searchfield, ln)
if not req:
html.append(fame_info)
elif hasattr(req, "write"):
req.write(fame_info)
# 4) hcs calculate h index
d_h_factors = {}
def comparator(x, y):
if x[1] > y[1]:
return -1
elif x[1] == y[1]:
return 0
else: return +1
for coll, colldef in CFG_CITESUMMARY_COLLECTIONS:
d_h_factors[coll] = 0
d_recid_citecount_l[coll].sort(cmp=comparator)
#req.write(repr(d_recid_citecount_l[coll])) # DEBUG
for citecount in d_recid_citecount_l[coll]:
d_h_factors[coll] += 1
if d_h_factors[coll] > citecount[1]:
d_h_factors[coll] -= 1
break
h_idx = websearch_templates.tmpl_citesummary_h_index(d_h_factors, CFG_CITESUMMARY_COLLECTIONS, ln)
if not req:
html.append(h_idx)
elif hasattr(req, "write"):
req.write(h_idx)
# 5) hcs epilogue:
eplilogue = websearch_templates.tmpl_citesummary_epilogue(ln)
if not req:
html.append(eplilogue)
elif hasattr(req, "write"):
req.write(eplilogue)
if not req:
return "\n".join(html)
else:
return ''
elif of == 'xcs':
# this is XML cite summary
citedbylist = get_cited_by_list(recids)
return print_citation_summary_xml(citedbylist)
#for citation summary, code xcs/hcs (unless changed)
def print_citation_summary_xml(citedbylist):
"""Prints citation summary in xml."""
alldict = calculate_citations(citedbylist)
avgstr = str(alldict['avgcites'])
totalcites = str(alldict['totalcites'])
#format avg so that it does not span 10 digits
avgstr = avgstr[0:4]
reciddict = alldict['reciddict']
#output formatting
outp = "<citationsummary records=\""+str(len(citedbylist))
outp += "\" citations=\""+str(totalcites)+"\">"
for low, high, name in CFG_CITESUMMARY_FAME_THRESHOLDS:
#get the name, print the value
if reciddict.has_key(name):
recs = reciddict[name]
outp += "<citationclass>"+name
outp += "<records>"+str(recs)+"</records>"
outp += "</citationclass>\n"
outp = outp + "</citationsummary>"
#req.write(outp)
return outp #just to return something
def calculate_citations(citedbylist):
"""calculates records in classes of citations
defined by thresholds. returns a dictionary that
contains total, avg, records and a dictionary
of threshold names and number corresponding to it"""
totalcites = 0
avgcites = 0
reciddict = {}
for recid, cites in citedbylist:
numcites = 0
if cites:
numcites = len(cites)
totalcites = totalcites + numcites
#take the numbers in CFG_CITESUMMARY_FAME_THRESHOLDS
for low, high, name in CFG_CITESUMMARY_FAME_THRESHOLDS:
if (numcites >= low) and (numcites <= high):
if reciddict.has_key(name):
tmp = reciddict[name]
tmp.append(recid)
reciddict[name] = tmp
else:
reciddict[name] = [recid]
if (len(citedbylist) == 0):
avgcites = 0
else:
avgcites = totalcites*1.0/len(citedbylist)
#create a dictionary that contains all the values
alldict = {}
alldict['records'] = len(citedbylist)
alldict['totalcites'] = totalcites
alldict['avgcites'] = avgcites
alldict['reciddict'] = reciddict
return alldict
|
gpl-2.0
|
kvar/ansible
|
test/units/modules/network/check_point/test_cp_mgmt_package_facts.py
|
19
|
2835
|
# Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_package_facts
OBJECT = {
"from": 1,
"to": 1,
"total": 6,
"objects": [
"53de74b7-8f19-4cbe-99fc-a81ef0759bad"
]
}
SHOW_PLURAL_PAYLOAD = {
'limit': 1,
'details_level': 'uid'
}
SHOW_SINGLE_PAYLOAD = {
'name': 'object_which_is_not_exist'
}
api_call_object = 'package'
api_call_object_plural_version = 'packages'
failure_msg = '''{u'message': u'Requested object [object_which_is_not_exist] not found', u'code': u'generic_err_object_not_found'}'''
class TestCheckpointPackageFacts(object):
module = cp_mgmt_package_facts
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_show_single_object_which_is_not_exist(self, mocker, connection_mock):
connection_mock.send_request.return_value = (404, failure_msg)
try:
result = self._run_module(SHOW_SINGLE_PAYLOAD)
except Exception as e:
result = e.args[0]
assert result['failed']
assert 'Checkpoint device returned error 404 with message ' + failure_msg == result['msg']
def test_show_few_objects(self, mocker, connection_mock):
connection_mock.send_request.return_value = (200, OBJECT)
result = self._run_module(SHOW_PLURAL_PAYLOAD)
assert not result['changed']
assert OBJECT == result['ansible_facts'][api_call_object_plural_version]
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
|
gpl-3.0
|
r3kall/AnimeRecommenderSystem
|
animerecommendersystem/recommender_systems/CollaborativeFilteringRS.py
|
1
|
8293
|
"""
This files offers a recommendation system based on collaborative filtering technique.
1) Let U be the user we want to give recommendations to, for each user U2 != U we need to compute distance(U, U2) (*)
and get the top K neighbors. These neighbors should have watched a lot of animes also watched by U,
giving to them similar rates.
2) Once we have these K neighbors, we compute an aggregate rate to the animes they watched
by using the rates given by them (excluding the ones already watched by U, obviously).
In other words, we try to return as recommendations the animes watched by most of the neighbors
and with an high rate by (almost) all of them.
(*)HOW DO WE COMPUTE THE DISTANCE BETWEEN U AND U2?
Idea: cosine similarity
In particolar, for each anime watched by both users, we should compute the product of rates.
"""
import math
from collections import defaultdict
from animerecommendersystem.utils import definitions
STD_NUM_NEIGHBORS = 5
STD_NUM_RECOMM = 10
AVG_NEAREST_DISTANCE = 0.50
RELAX_RATIO = 1.1
# Constants for vote prediction
MAX_PREDICT_RATE = 10.
MIN_PREDICT_RATE = 3.
class CollaborativeFilteringRS:
def __init__(self, users_anime_lists, num_neighbors=STD_NUM_NEIGHBORS,
num_recommendations=STD_NUM_RECOMM, approx=True):
self.users_anime_lists = users_anime_lists
self.num_neighbors = num_neighbors
self.num_recommendations = num_recommendations
self.approx = approx
def compute_distance(self, username1, username2):
# Take the list of animes for each user
user1_animes = self.users_anime_lists[username1]
user2_animes = self.users_anime_lists[username2]
distance_numerator = 0
square_sum_1 = 0
square_sum_2 = 0
# Create a set that contains animes watched by at least one of the user.
total_set_animes = set(user1_animes['list'].keys())
total_set_animes |= set(user2_animes['list'].keys())
for anime in total_set_animes:
watched1 = False
watched2 = False
user1_rate = 0
user2_rate = 0
if anime in user1_animes['list'].keys():
watched1 = True
user1_rate = user1_animes['list'][anime]['rate']
if user1_rate == 0:
user1_rate = self.estimate_rate(user1_animes, anime)
square_sum_1 += user1_rate * user1_rate
if anime in user2_animes['list'].keys():
watched2 = True
user2_rate = user2_animes['list'][anime]['rate']
if user2_rate == 0:
user2_rate = self.estimate_rate(user2_animes, anime)
square_sum_2 += user2_rate * user2_rate
# If both users' lists contain this anime, then we need to increase the similarity
if watched1 and watched2:
distance_numerator += user1_rate * user2_rate
# At the end, use the values collected so far to compute the distance between users.
distance_denominator = math.sqrt(square_sum_1) * math.sqrt(square_sum_2)
similarity = distance_numerator / distance_denominator
distance = 1 - similarity
return distance
@staticmethod
def estimate_rate(user_animes, anime):
neighbor_rate = user_animes['mean_rate']
if neighbor_rate == 0:
anime_state = user_animes['list'][anime]['curr_state']
if anime_state == definitions.COMPLETED:
neighbor_rate = definitions.COMPLETED_RATE
elif anime_state == definitions.WATCHING:
neighbor_rate = definitions.WATCHING_RATE
elif anime_state == definitions.DROPPED:
neighbor_rate = definitions.DROPPED_RATE
elif anime_state == definitions.PLANNED:
neighbor_rate = definitions.PLANNED_RATE
elif anime_state == definitions.ON_HOLD:
neighbor_rate = definitions.ON_HOLD_RATE
return neighbor_rate
def get_neighbors(self, user):
if self.approx is True:
return self.get_approx_neighbors(user)
else:
return self.get_exact_neighbors(user)
def get_approx_neighbors(self, user):
"""
Basic idea: compute distance between 'username''s list and all other users, and pick the nearest ones.
=> PROBLEM: TOO SLOW.
=> SOLUTION: no need to pick the nearest one, but some near users will be still ok.
"""
neighbors = defaultdict(float)
how_many_good = 0
for user2 in self.users_anime_lists.keys():
if user2 == user or self.users_anime_lists[user2].get('list') is None:
continue
distance = self.compute_distance(user, user2)
neighbors[user2] = distance
# If this user is close enough to our target, then we take him as a neighbor
if distance <= AVG_NEAREST_DISTANCE * RELAX_RATIO:
how_many_good += 1
if how_many_good == self.num_neighbors:
break
# Sort neighbors according to distance, and return them
sorted_neighbors = sorted(neighbors, key=neighbors.get, reverse=False)
# return a dict, so we have also the similarity as info
res = dict()
for neighbor in sorted_neighbors[0:self.num_neighbors]:
# similarity
res[neighbor] = 1 - neighbors[neighbor]
return res
def get_exact_neighbors(self, user):
distances_dict = defaultdict(float)
for user2 in self.users_anime_lists.keys():
if user2 == user or self.users_anime_lists[user2].get('list') is None:
continue
distance = self.compute_distance(user, user2)
distances_dict[user2] = distance
# Once we have all distances, sort the dict by value and return a list containing
# the usernames of the nearest ones.
sorted_neighbors = sorted(distances_dict, key=distances_dict.get, reverse=False)
return sorted_neighbors[0:self.num_neighbors]
def get_recommendations(self, user):
neighbors_dict = self.get_neighbors(user)
predictions_rates_dict = defaultdict(float)
predictions_rates_num_dict = dict()
predictions_rates_den_dict = dict()
user_animes = self.users_anime_lists[user]
for neighbor in neighbors_dict.keys():
neighbor_animes = self.users_anime_lists[neighbor]
for anime in neighbor_animes['list'].keys():
if anime not in user_animes['list'].keys():
neighbor_rate = neighbor_animes['list'][anime]['rate']
if neighbor_rate > 0:
predictions_rates_num_dict[anime] = predictions_rates_num_dict.get(anime, 0) + \
neighbors_dict[neighbor] * \
(neighbor_rate - self.users_anime_lists[neighbor]['mean_rate'])
predictions_rates_den_dict[anime] = predictions_rates_den_dict.get(anime, 0) + neighbors_dict[
neighbor]
for anime in predictions_rates_num_dict.keys():
if predictions_rates_den_dict[anime] == 0:
predictions_rates_dict[anime] = self.users_anime_lists[user]['mean_rate']
else:
predictions_rates_dict[anime] = self.users_anime_lists[user]['mean_rate'] + \
(float(predictions_rates_num_dict[anime]) / float(
predictions_rates_den_dict[anime]))
if predictions_rates_dict[anime] < MIN_PREDICT_RATE:
predictions_rates_dict[anime] = MIN_PREDICT_RATE
elif predictions_rates_dict[anime] > MAX_PREDICT_RATE:
predictions_rates_dict[anime] = MAX_PREDICT_RATE
sorted_animes = sorted(predictions_rates_dict, key=predictions_rates_dict.get, reverse=True)
results = dict()
for anime in sorted_animes[0:self.num_recommendations]:
results[anime] = predictions_rates_dict[anime]
return results
|
gpl-3.0
|
dhalleine/tensorflow
|
tensorflow/tools/dist_test/python/mnist_replica.py
|
7
|
10321
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed MNIST training and validation, with model replicas.
A simple softmax model with one hidden layer is defined. The parameters
(weights and biases) are located on two parameter servers (ps), while the
ops are defined on a worker node. The TF sessions also run on the worker
node.
Multiple invocations of this script can be done in parallel, with different
values for --worker_index. There should be exactly one invocation with
--worker_index, which will create a master session that carries out variable
initialization. The other, non-master, sessions will wait for the master
session to finish the initialization before proceeding to the training stage.
The coordination between the multiple worker invocations occurs due to
the definition of the parameters on the same ps devices. The parameter updates
from one worker is visible to all other workers. As such, the workers can
perform forward computation and gradient calculation in parallel, which
should lead to increased training speed for the simple model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sys
import tempfile
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
flags = tf.app.flags
flags.DEFINE_string("data_dir", "/tmp/mnist-data",
"Directory for storing mnist data")
flags.DEFINE_boolean("download_only", False,
"Only perform downloading of data; Do not proceed to "
"session preparation, model definition or training")
flags.DEFINE_integer("worker_index", 0,
"Worker task index, should be >= 0. worker_index=0 is "
"the master worker task the performs the variable "
"initialization ")
flags.DEFINE_integer("num_workers", None,
"Total number of workers (must be >= 1)")
flags.DEFINE_integer("num_parameter_servers", 2,
"Total number of parameter servers (must be >= 1)")
flags.DEFINE_integer("replicas_to_aggregate", None,
"Number of replicas to aggregate before parameter update"
"is applied (For sync_replicas mode only; default: "
"num_workers)")
flags.DEFINE_integer("grpc_port", 2222,
"TensorFlow GRPC port")
flags.DEFINE_integer("hidden_units", 100,
"Number of units in the hidden layer of the NN")
flags.DEFINE_integer("train_steps", 200,
"Number of (global) training steps to perform")
flags.DEFINE_integer("batch_size", 100, "Training batch size")
flags.DEFINE_float("learning_rate", 0.01, "Learning rate")
flags.DEFINE_string("worker_grpc_url", None,
"Worker GRPC URL (e.g., grpc://1.2.3.4:2222, or "
"grpc://tf-worker0:2222)")
flags.DEFINE_boolean("sync_replicas", False,
"Use the sync_replicas (synchronized replicas) mode, "
"wherein the parameter updates from workers are aggregated "
"before applied to avoid stale gradients")
FLAGS = flags.FLAGS
IMAGE_PIXELS = 28
PARAM_SERVER_PREFIX = "tf-ps" # Prefix of the parameter servers' domain names
WORKER_PREFIX = "tf-worker" # Prefix of the workers' domain names
def get_device_setter(num_parameter_servers, num_workers):
"""Get a device setter given number of servers in the cluster.
Given the numbers of parameter servers and workers, construct a device
setter object using ClusterSpec.
Args:
num_parameter_servers: Number of parameter servers
num_workers: Number of workers
Returns:
Device setter object.
"""
ps_spec = []
for j in range(num_parameter_servers):
ps_spec.append("%s%d:%d" % (PARAM_SERVER_PREFIX, j, FLAGS.grpc_port))
worker_spec = []
for k in range(num_workers):
worker_spec.append("%s%d:%d" % (WORKER_PREFIX, k, FLAGS.grpc_port))
cluster_spec = tf.train.ClusterSpec({
"ps": ps_spec,
"worker": worker_spec})
# Get device setter from the cluster spec
return tf.train.replica_device_setter(cluster=cluster_spec)
def main(unused_argv):
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
if FLAGS.download_only:
sys.exit(0)
print("Worker GRPC URL: %s" % FLAGS.worker_grpc_url)
print("Worker index = %d" % FLAGS.worker_index)
print("Number of workers = %d" % FLAGS.num_workers)
# Sanity check on the number of workers and the worker index
if FLAGS.worker_index >= FLAGS.num_workers:
raise ValueError("Worker index %d exceeds number of workers %d " %
(FLAGS.worker_index, FLAGS.num_workers))
# Sanity check on the number of parameter servers
if FLAGS.num_parameter_servers <= 0:
raise ValueError("Invalid num_parameter_servers value: %d" %
FLAGS.num_parameter_servers)
is_chief = (FLAGS.worker_index == 0)
if FLAGS.sync_replicas:
if FLAGS.replicas_to_aggregate is None:
replicas_to_aggregate = FLAGS.num_workers
else:
replicas_to_aggregate = FLAGS.replicas_to_aggregate
# Construct device setter object
device_setter = get_device_setter(FLAGS.num_parameter_servers,
FLAGS.num_workers)
# The device setter will automatically place Variables ops on separate
# parameter servers (ps). The non-Variable ops will be placed on the workers.
with tf.device(device_setter):
global_step = tf.Variable(0, name="global_step", trainable=False)
# Variables of the hidden layer
hid_w = tf.Variable(
tf.truncated_normal([IMAGE_PIXELS * IMAGE_PIXELS, FLAGS.hidden_units],
stddev=1.0 / IMAGE_PIXELS), name="hid_w")
hid_b = tf.Variable(tf.zeros([FLAGS.hidden_units]), name="hid_b")
# Variables of the softmax layer
sm_w = tf.Variable(
tf.truncated_normal([FLAGS.hidden_units, 10],
stddev=1.0 / math.sqrt(FLAGS.hidden_units)),
name="sm_w")
sm_b = tf.Variable(tf.zeros([10]), name="sm_b")
# Ops: located on the worker specified with FLAGS.worker_index
x = tf.placeholder(tf.float32, [None, IMAGE_PIXELS * IMAGE_PIXELS])
y_ = tf.placeholder(tf.float32, [None, 10])
hid_lin = tf.nn.xw_plus_b(x, hid_w, hid_b)
hid = tf.nn.relu(hid_lin)
y = tf.nn.softmax(tf.nn.xw_plus_b(hid, sm_w, sm_b))
cross_entropy = -tf.reduce_sum(y_ *
tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
opt = tf.train.AdamOptimizer(FLAGS.learning_rate)
if FLAGS.sync_replicas:
opt = tf.train.SyncReplicasOptimizer(
opt,
replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=FLAGS.num_workers,
replica_id=FLAGS.worker_index,
name="mnist_sync_replicas")
train_step = opt.minimize(cross_entropy,
global_step=global_step)
if FLAGS.sync_replicas and is_chief:
# Initial token and chief queue runners required by the sync_replicas mode
chief_queue_runner = opt.get_chief_queue_runner()
init_tokens_op = opt.get_init_tokens_op()
init_op = tf.initialize_all_variables()
train_dir = tempfile.mkdtemp()
sv = tf.train.Supervisor(is_chief=is_chief,
logdir=train_dir,
init_op=init_op,
recovery_wait_secs=1,
global_step=global_step)
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=True,
device_filters=["/job:ps", "/job:worker/task:%d" % FLAGS.worker_index])
# The chief worker (worker_index==0) session will prepare the session,
# while the remaining workers will wait for the preparation to complete.
if is_chief:
print("Worker %d: Initializing session..." % FLAGS.worker_index)
else:
print("Worker %d: Waiting for session to be initialized..." %
FLAGS.worker_index)
sess = sv.prepare_or_wait_for_session(FLAGS.worker_grpc_url,
config=sess_config)
print("Worker %d: Session initialization complete." % FLAGS.worker_index)
if FLAGS.sync_replicas and is_chief:
# Chief worker will start the chief queue runner and call the init op
print("Starting chief queue runner and running init_tokens_op")
sv.start_queue_runners(sess, [chief_queue_runner])
sess.run(init_tokens_op)
# Perform training
time_begin = time.time()
print("Training begins @ %f" % time_begin)
local_step = 0
while True:
# Training feed
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batch_size)
train_feed = {x: batch_xs,
y_: batch_ys}
_, step = sess.run([train_step, global_step], feed_dict=train_feed)
local_step += 1
now = time.time()
print("%f: Worker %d: training step %d done (global step: %d)" %
(now, FLAGS.worker_index, local_step, step))
if step >= FLAGS.train_steps:
break
time_end = time.time()
print("Training ends @ %f" % time_end)
training_time = time_end - time_begin
print("Training elapsed time: %f s" % training_time)
# Validation feed
val_feed = {x: mnist.validation.images,
y_: mnist.validation.labels}
val_xent = sess.run(cross_entropy, feed_dict=val_feed)
print("After %d training step(s), validation cross entropy = %g" %
(FLAGS.train_steps, val_xent))
if __name__ == "__main__":
tf.app.run()
|
apache-2.0
|
inovtec-solutions/OpenERP
|
openerp/addons/plugin/plugin_handler.py
|
11
|
8035
|
'''
Created on 18 oct. 2011
@author: openerp
'''
from openerp.osv import osv
from openerp.tools.translate import _
class plugin_handler(osv.osv_memory):
_name = 'plugin.handler'
def _make_url(self, cr, uid, res_id, model, context=None):
"""
@param res_id: on which document the message is pushed
@param model: name of the document linked with the mail
@return url
"""
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default='http://localhost:8069', context=context)
if base_url:
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
base_url += '/login?db=%s&login=%s&key=%s#id=%s&model=%s' % (cr.dbname, user.login, user.password, res_id, model)
return base_url
def is_installed(self, cr, uid):
return True
def partner_get(self, cr, uid, address_email):
partner_obj = self.pool.get('res.partner')
partner_ids = partner_obj.search(cr, uid, [('email', 'like', address_email)])
res_id = partner_ids and partner_ids[0] or 0
url = self._make_url(cr, uid, res_id, 'res.partner')
return ('res.partner', res_id, url)
def document_get(self, cr, uid, email):
"""
@param email: email is a standard RFC2822 email message
@return Dictionary which contain id and the model name of the document linked with the mail
if no document is found the id = 0
(model_name, res_id, url, name_get)
"""
mail_message_obj = self.pool.get('mail.message')
model = ""
res_id = 0
url = ""
name = ""
msg = self.pool.get('mail.thread').message_parse(cr, uid, email)
parent_id = msg.get('parent_id', False)
message_id = msg.get('message_id')
msg_id = False
if message_id:
msg_ids = mail_message_obj.search(cr, uid, [('message_id', '=', message_id)])
msg_id = len(msg_ids) and msg_ids[0] or False
if not msg_id and parent_id:
msg_id = parent_id
if msg_id:
msg = mail_message_obj.browse(cr, uid, msg_id)
res_id = msg.res_id
model = msg.model
url = self._make_url(cr, uid, res_id, model)
name = self.pool.get(model).name_get(cr, uid, [res_id])[0][1]
return (model, res_id, url, name)
def document_type(self, cr, uid, context=None):
"""
Return the list of available model to push
res.partner is a special case
otherwise all model that inherit from mail.thread
['res.partner', 'project.issue']
"""
mail_thread_obj = self.pool.get('mail.thread')
doc_dict = mail_thread_obj.message_capable_models(cr, uid, context)
doc_dict['res.partner'] = "Partner"
return doc_dict.items()
# Can be used where search record was used
def list_document_get(self, cr, uid, model, name):
"""
This function return the result of name_search on the object model
@param model: the name of the model
@param : the name of the document
@return : the result of name_search a list of tuple
[(id, 'name')]
"""
return self.pool.get(model).name_search(cr, uid, name)
def push_message(self, cr, uid, model, email, res_id=0):
"""
@param email: email is a standard RFC2822 email message
@param model: On which model the message is pushed
@param thread_id: on which document the message is pushed, if thread_id = 0 a new document is created
@return Dictionary which contain model , url and resource id.
"""
mail_message = self.pool.get('mail.message')
model_obj = self.pool.get(model)
msg = self.pool.get('mail.thread').message_parse(cr, uid, email)
message_id = msg.get('message-id')
mail_ids = mail_message.search(cr, uid, [('message_id', '=', message_id), ('res_id', '=', res_id), ('model', '=', model)])
if message_id and mail_ids:
mail_record = mail_message.browse(cr, uid, mail_ids)[0]
res_id = mail_record.res_id
notify = _("Email already pushed")
elif res_id == 0:
if model == 'res.partner':
notify = _('Use the Partner button to create a new partner')
else:
res_id = model_obj.message_process(cr, uid, model, email)
notify = _("Mail successfully pushed, a new %s has been created.") % model
else:
model_obj.message_post(cr, uid, [res_id],
body=msg.get('body'),
subject=msg.get('subject'),
type='comment' if model == 'res.partner' else 'email',
parent_id=msg.get('parent_id'),
attachments=msg.get('attachments'))
notify = _("Mail successfully pushed")
url = self._make_url(cr, uid, res_id, model)
return (model, res_id, url, notify)
def contact_create(self, cr, uid, data, partner_id, context=None):
"""
@param data : the data use to create the res.partner
[('field_name', value)], field name is required
@param partner_id : On which partner the address is attached
if partner_id = 0 then create a new partner with the same name that the address
@return : the partner_id sended or created, this allow the plugin to open the right partner page
"""
partner_obj = self.pool.get('res.partner')
dictcreate = dict(data)
if partner_id:
is_company = partner_obj.browse(cr, uid, partner_id, context=context).is_company
if is_company:
dictcreate['parent_id'] = partner_id
partner_id = partner_obj.create(cr, uid, dictcreate)
url = self._make_url(cr, uid, partner_id, 'res.partner')
return ('res.partner', partner_id, url)
# Specific to outlook rfc822 is not available so we split in arguments headerd,body,attachemnts
def push_message_outlook(self, cr, uid, model, headers, res_id=0, body=False, body_html=False, attachments=False):
# ----------------------------------------
# solution 1
# construct a fake rfc822 from the separated arguement
#m = email.asdfsadf
# use the push_message method
#self.push_message(m)
# ----------------------------------------
# solution 2
# use self.pushmessage only with header and body
# add attachemnt yourself after
mail_message = self.pool.get('mail.message')
ir_attachment_obj = self.pool.get('ir.attachment')
attach_ids = []
msg = self.pool.get('mail.thread').message_parse(cr, uid, headers)
message_id = msg.get('message-id')
push_mail = self.push_message(cr, uid, model, headers, res_id)
res_id = push_mail[1]
model = push_mail[0]
notify = push_mail[3]
for name in attachments.keys():
attachment_ids = ir_attachment_obj.search(cr, uid, [('res_model', '=', model), ('res_id', '=', res_id), ('datas_fname', '=', name)])
if attachment_ids:
attach_ids.append(attachment_ids[0])
else:
vals = {"res_model": model, "res_id": res_id, "name": name, "datas": attachments[name], "datas_fname": name}
attach_ids.append(ir_attachment_obj.create(cr, uid, vals))
mail_ids = mail_message.search(cr, uid, [('message_id', '=', message_id), ('res_id', '=', res_id), ('model', '=', model)])
if mail_ids:
mail_message.write(cr, uid, mail_ids[0], {'attachment_ids': [(6, 0, attach_ids)], 'body': body, 'body_html': body_html})
url = self._make_url(cr, uid, res_id, model)
return (model, res_id, url, notify)
|
agpl-3.0
|
icereval/scrapi
|
scrapi/harvesters/stepic.py
|
1
|
2795
|
"""
Stepic.org harvester of MOOC-online courses for SHARE Notification Service
Example API query: https://stepic.org:443/api/lessons/100
"""
from __future__ import unicode_literals
import json
import pycountry
from dateutil.parser import parse
from scrapi import requests
from scrapi.base import JSONHarvester
from scrapi.linter.document import RawDocument
def process_owner(owners_id):
resp = requests.get("https://stepic.org/api/users/" + str(owners_id)).json()
try:
person = resp[u'users'][0]
except KeyError:
person = {u'first_name': '', u'last_name': ''}
owner = {
'name': " ".join([person[u'first_name'], person[u'last_name']]),
'givenName': person[u'first_name'],
'additionalName': '',
'familyName': person[u'last_name'],
'email': '',
'sameAs': [],
}
return [owner]
class StepicHarvester(JSONHarvester):
short_name = 'stepic'
long_name = 'Stepic.org Online Education Platform'
url = 'http://www.stepic.org'
count = 0
URL = 'https://stepic.org/api/lessons'
@property
def schema(self):
return {
'contributors': ('/owner', process_owner),
'uris': {
'canonicalUri': ('/id', lambda x: self.url + '/' + str(x))
},
'title': '/title',
'providerUpdatedDateTime': ('/update_date', lambda x: parse(x).isoformat()),
'description': '/title',
'languages': ('/language', lambda x: [pycountry.languages.get(alpha2=x).terminology])
}
def harvest(self, start_date=None, end_date=None):
# TODO - stepic has no means of querying by date, we should add handling for the
# start and end date once it does.
search_url = self.URL
records = self.get_records(search_url)
record_list = []
for record in records:
doc_id = record['id']
record_list.append(
RawDocument(
{
'doc': json.dumps(record),
'source': self.short_name,
'docID': ('stepic_doc' + str(doc_id)).decode('utf-8'),
'filetype': 'json'
}
)
)
return record_list
def get_records(self, search_url):
all_lessons = []
resp = requests.get(self.URL + '?page=last').json()
last_lesson_id = resp['lessons'][-1]['id']
for pk in range(last_lesson_id + 1):
lesson = requests.get(search_url + "/" + str(pk), expected=[200, 403, 404])
if lesson.status_code == 200:
lesson_list = lesson.json()['lessons'][0]
all_lessons.append(lesson_list)
return all_lessons
|
apache-2.0
|
lepistone/purchase-workflow
|
purchase_fiscal_position_update/__openerp__.py
|
9
|
1882
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Purchase Fiscal Position Update module for Odoo
# Copyright (C) 2011-2014 Julius Network Solutions SARL <contact@julius.fr>
# Copyright (C) 2014 Akretion (http://www.akretion.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Purchase Fiscal Position Update',
'version': '8.0.1.0.0',
'category': 'Purchase Management',
'license': 'AGPL-3',
'summary': 'Changing the fiscal position of a purchase order will '
'auto-update purchase order lines',
'description': """
Purchase Fiscal Position Update
===============================
With this module, when a user changes the fiscal position of a purchase order,
the taxes on all the purchase order lines which have a product are
automatically updated. The purchase order lines without a product are not
updated and a warning is displayed to the user in this case.
""",
'author': "Julius Network Solutions,"
"Akretion,"
"Odoo Community Association (OCA)",
'depends': ['purchase'],
'data': [],
'installable': True,
}
|
agpl-3.0
|
eblot/micropython
|
tools/file2h.py
|
103
|
1126
|
# Reads in a text file, and performs the necessary escapes so that it
# can be #included as a static string like:
# static const char string_from_textfile[] =
# #include "build/textfile.h"
# ;
# This script simply prints the escaped string straight to stdout
from __future__ import print_function
import sys
# Can either be set explicitly, or left blank to auto-detect
# Except auto-detect doesn't work because the file has been passed
# through Python text processing, which makes all EOL a \n
line_end = '\\r\\n'
if __name__ == "__main__":
filename = sys.argv[1]
for line in open(filename, 'r').readlines():
if not line_end:
for ending in ('\r\n', '\r', '\n'):
if line.endswith(ending):
line_end = ending.replace('\r', '\\r').replace('\n', '\\n')
break
if not line_end:
raise Exception("Couldn't auto-detect line-ending of %s" % filename)
line = line.rstrip('\r\n')
line = line.replace('\\', '\\\\')
line = line.replace('"', '\\"')
print('"%s%s"' % (line, line_end))
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.