repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ysekky/GPy
|
GPy/testing/pep_tests.py
|
6
|
3437
|
# Copyright (c) 2014, James Hensman, 2016, Thang Bui
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import unittest
import numpy as np
import GPy
class PEPgradienttest(unittest.TestCase):
def setUp(self):
######################################
# # 1 dimensional example
np.random.seed(10)
N = 20
# sample inputs and outputs
self.X1D = np.random.uniform(-3., 3., (N, 1))
self.Y1D = np.sin(self.X1D) + np.random.randn(N, 1) * 0.05
######################################
# # 2 dimensional example
# sample inputs and outputs
self.X2D = np.random.uniform(-3., 3., (N, 2))
self.Y2D = np.sin(self.X2D[:, 0:1]) * np.sin(self.X2D[:, 1:2]) + np.random.randn(N, 1) * 0.05
#######################################
# # more datapoints, check in alpha limits, the log marginal likelihood
# # is consistent with FITC and VFE/Var_DTC
M = 5
np.random.seed(42)
self.X1 = np.c_[np.linspace(-1., 1., N)]
self.Y1 = np.sin(self.X1) + np.random.randn(N, 1) * 0.05
self.kernel = GPy.kern.RBF(input_dim=1, lengthscale=0.5, variance=1)
self.Z = np.random.uniform(-1, 1, (M, 1))
self.lik_noise_var = 0.01
def test_pep_1d_gradients(self):
m = GPy.models.SparseGPRegression(self.X1D, self.Y1D)
m.inference_method = GPy.inference.latent_function_inference.PEP(alpha=np.random.rand())
self.assertTrue(m.checkgrad())
def test_pep_2d_gradients(self):
m = GPy.models.SparseGPRegression(self.X2D, self.Y2D)
m.inference_method = GPy.inference.latent_function_inference.PEP(alpha=np.random.rand())
self.assertTrue(m.checkgrad())
def test_pep_vfe_consistency(self):
vfe_model = GPy.models.SparseGPRegression(
self.X1,
self.Y1,
kernel=self.kernel,
Z=self.Z
)
vfe_model.inference_method = GPy.inference.latent_function_inference.VarDTC()
vfe_model.Gaussian_noise.variance = self.lik_noise_var
vfe_lml = vfe_model.log_likelihood()
pep_model = GPy.models.SparseGPRegression(
self.X1,
self.Y1,
kernel=self.kernel,
Z=self.Z
)
pep_model.inference_method = GPy.inference.latent_function_inference.PEP(alpha=1e-5)
pep_model.Gaussian_noise.variance = self.lik_noise_var
pep_lml = pep_model.log_likelihood()
self.assertAlmostEqual(vfe_lml[0, 0], pep_lml[0], delta=abs(0.01*pep_lml[0]))
def test_pep_fitc_consistency(self):
fitc_model = GPy.models.SparseGPRegression(
self.X1D,
self.Y1D,
kernel=self.kernel,
Z=self.Z
)
fitc_model.inference_method = GPy.inference.latent_function_inference.FITC()
fitc_model.Gaussian_noise.variance = self.lik_noise_var
fitc_lml = fitc_model.log_likelihood()
pep_model = GPy.models.SparseGPRegression(
self.X1D,
self.Y1D,
kernel=self.kernel,
Z=self.Z
)
pep_model.inference_method = GPy.inference.latent_function_inference.PEP(alpha=1)
pep_model.Gaussian_noise.variance = self.lik_noise_var
pep_lml = pep_model.log_likelihood()
self.assertAlmostEqual(fitc_lml, pep_lml[0], delta=abs(0.001*pep_lml[0]))
|
bsd-3-clause
|
gennad/Django-nonrel-stub-for-Google-App-Engine
|
django/contrib/staticfiles/management/commands/findstatic.py
|
244
|
1230
|
import os
from optparse import make_option
from django.core.management.base import LabelCommand
from django.utils.encoding import smart_str, smart_unicode
from django.contrib.staticfiles import finders
class Command(LabelCommand):
help = "Finds the absolute paths for the given static file(s)."
args = "[file ...]"
label = 'static file'
option_list = LabelCommand.option_list + (
make_option('--first', action='store_false', dest='all', default=True,
help="Only return the first match for each static file."),
)
def handle_label(self, path, **options):
verbosity = int(options.get('verbosity', 1))
result = finders.find(path, all=options['all'])
path = smart_unicode(path)
if result:
if not isinstance(result, (list, tuple)):
result = [result]
output = u'\n '.join(
(smart_unicode(os.path.realpath(path)) for path in result))
self.stdout.write(
smart_str(u"Found '%s' here:\n %s\n" % (path, output)))
else:
if verbosity >= 1:
self.stderr.write(
smart_str("No matching file found for '%s'.\n" % path))
|
bsd-3-clause
|
ChinaMassClouds/copenstack-server
|
openstack/src/ceilometer-2014.2.2/ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py
|
6
|
1118
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.utils import timeutils
import sqlalchemy
from ceilometer.storage.sqlalchemy import models
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
meter = sqlalchemy.Table('meter', meta, autoload=True)
c = sqlalchemy.Column('recorded_at', models.PreciseTimestamp(),
default=timeutils.utcnow)
meter.create_column(c)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
meter = sqlalchemy.Table('meter', meta, autoload=True)
meter.drop_column('recorded_at')
|
gpl-2.0
|
ericzolf/ansible
|
lib/ansible/plugins/filter/core.py
|
4
|
21595
|
# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import crypt
import glob
import hashlib
import itertools
import json
import ntpath
import os.path
import re
import string
import sys
import time
import uuid
import yaml
import datetime
from functools import partial
from random import Random, SystemRandom, shuffle
from jinja2.filters import environmentfilter, do_groupby as _do_groupby
from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleFilterTypeError
from ansible.module_utils.six import iteritems, string_types, integer_types, reraise, text_type
from ansible.module_utils.six.moves import reduce, shlex_quote
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.common._collections_compat import Mapping
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.template import recursive_check_defined
from ansible.utils.display import Display
from ansible.utils.encrypt import passlib_or_crypt
from ansible.utils.hashing import md5s, checksum_s
from ansible.utils.unicode import unicode_wrap
from ansible.utils.vars import merge_hash
display = Display()
UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E')
def to_yaml(a, *args, **kw):
'''Make verbose, human readable yaml'''
default_flow_style = kw.pop('default_flow_style', None)
transformed = yaml.dump(a, Dumper=AnsibleDumper, allow_unicode=True, default_flow_style=default_flow_style, **kw)
return to_text(transformed)
def to_nice_yaml(a, indent=4, *args, **kw):
'''Make verbose, human readable yaml'''
transformed = yaml.dump(a, Dumper=AnsibleDumper, indent=indent, allow_unicode=True, default_flow_style=False, **kw)
return to_text(transformed)
def to_json(a, *args, **kw):
''' Convert the value to JSON '''
return json.dumps(a, cls=AnsibleJSONEncoder, *args, **kw)
def to_nice_json(a, indent=4, sort_keys=True, *args, **kw):
'''Make verbose, human readable JSON'''
return to_json(a, indent=indent, sort_keys=sort_keys, separators=(',', ': '), *args, **kw)
def to_bool(a):
''' return a bool for the arg '''
if a is None or isinstance(a, bool):
return a
if isinstance(a, string_types):
a = a.lower()
if a in ('yes', 'on', '1', 'true', 1):
return True
return False
def to_datetime(string, format="%Y-%m-%d %H:%M:%S"):
return datetime.datetime.strptime(string, format)
def strftime(string_format, second=None):
''' return a date string using string. See https://docs.python.org/2/library/time.html#time.strftime for format '''
if second is not None:
try:
second = float(second)
except Exception:
raise AnsibleFilterError('Invalid value for epoch value (%s)' % second)
return time.strftime(string_format, time.localtime(second))
def quote(a):
''' return its argument quoted for shell usage '''
if a is None:
a = u''
return shlex_quote(to_text(a))
def fileglob(pathname):
''' return list of matched regular files for glob '''
return [g for g in glob.glob(pathname) if os.path.isfile(g)]
def regex_replace(value='', pattern='', replacement='', ignorecase=False, multiline=False):
''' Perform a `re.sub` returning a string '''
value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
flags = 0
if ignorecase:
flags |= re.I
if multiline:
flags |= re.M
_re = re.compile(pattern, flags=flags)
return _re.sub(replacement, value)
def regex_findall(value, regex, multiline=False, ignorecase=False):
''' Perform re.findall and return the list of matches '''
value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
flags = 0
if ignorecase:
flags |= re.I
if multiline:
flags |= re.M
return re.findall(regex, value, flags)
def regex_search(value, regex, *args, **kwargs):
''' Perform re.search and return the list of matches or a backref '''
value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
groups = list()
for arg in args:
if arg.startswith('\\g'):
match = re.match(r'\\g<(\S+)>', arg).group(1)
groups.append(match)
elif arg.startswith('\\'):
match = int(re.match(r'\\(\d+)', arg).group(1))
groups.append(match)
else:
raise AnsibleFilterError('Unknown argument')
flags = 0
if kwargs.get('ignorecase'):
flags |= re.I
if kwargs.get('multiline'):
flags |= re.M
match = re.search(regex, value, flags)
if match:
if not groups:
return match.group()
else:
items = list()
for item in groups:
items.append(match.group(item))
return items
def ternary(value, true_val, false_val, none_val=None):
''' value ? true_val : false_val '''
if value is None and none_val is not None:
return none_val
elif bool(value):
return true_val
else:
return false_val
def regex_escape(string, re_type='python'):
string = to_text(string, errors='surrogate_or_strict', nonstring='simplerepr')
'''Escape all regular expressions special characters from STRING.'''
if re_type == 'python':
return re.escape(string)
elif re_type == 'posix_basic':
# list of BRE special chars:
# https://en.wikibooks.org/wiki/Regular_Expressions/POSIX_Basic_Regular_Expressions
return regex_replace(string, r'([].[^$*\\])', r'\\\1')
# TODO: implement posix_extended
# It's similar to, but different from python regex, which is similar to,
# but different from PCRE. It's possible that re.escape would work here.
# https://remram44.github.io/regex-cheatsheet/regex.html#programs
elif re_type == 'posix_extended':
raise AnsibleFilterError('Regex type (%s) not yet implemented' % re_type)
else:
raise AnsibleFilterError('Invalid regex type (%s)' % re_type)
def from_yaml(data):
if isinstance(data, string_types):
return yaml.safe_load(data)
return data
def from_yaml_all(data):
if isinstance(data, string_types):
return yaml.safe_load_all(data)
return data
@environmentfilter
def rand(environment, end, start=None, step=None, seed=None):
if seed is None:
r = SystemRandom()
else:
r = Random(seed)
if isinstance(end, integer_types):
if not start:
start = 0
if not step:
step = 1
return r.randrange(start, end, step)
elif hasattr(end, '__iter__'):
if start or step:
raise AnsibleFilterError('start and step can only be used with integer values')
return r.choice(end)
else:
raise AnsibleFilterError('random can only be used on sequences and integers')
def randomize_list(mylist, seed=None):
try:
mylist = list(mylist)
if seed:
r = Random(seed)
r.shuffle(mylist)
else:
shuffle(mylist)
except Exception:
pass
return mylist
def get_hash(data, hashtype='sha1'):
try:
h = hashlib.new(hashtype)
except Exception as e:
# hash is not supported?
raise AnsibleFilterError(e)
h.update(to_bytes(data, errors='surrogate_or_strict'))
return h.hexdigest()
def get_encrypted_password(password, hashtype='sha512', salt=None, salt_size=None, rounds=None):
passlib_mapping = {
'md5': 'md5_crypt',
'blowfish': 'bcrypt',
'sha256': 'sha256_crypt',
'sha512': 'sha512_crypt',
}
hashtype = passlib_mapping.get(hashtype, hashtype)
try:
return passlib_or_crypt(password, hashtype, salt=salt, salt_size=salt_size, rounds=rounds)
except AnsibleError as e:
reraise(AnsibleFilterError, AnsibleFilterError(to_native(e), orig_exc=e), sys.exc_info()[2])
def to_uuid(string, namespace=UUID_NAMESPACE_ANSIBLE):
uuid_namespace = namespace
if not isinstance(uuid_namespace, uuid.UUID):
try:
uuid_namespace = uuid.UUID(namespace)
except (AttributeError, ValueError) as e:
raise AnsibleFilterError("Invalid value '%s' for 'namespace': %s" % (to_native(namespace), to_native(e)))
# uuid.uuid5() requires bytes on Python 2 and bytes or text or Python 3
return to_text(uuid.uuid5(uuid_namespace, to_native(string, errors='surrogate_or_strict')))
def mandatory(a, msg=None):
from jinja2.runtime import Undefined
''' Make a variable mandatory '''
if isinstance(a, Undefined):
if a._undefined_name is not None:
name = "'%s' " % to_text(a._undefined_name)
else:
name = ''
if msg is not None:
raise AnsibleFilterError(to_native(msg))
else:
raise AnsibleFilterError("Mandatory variable %s not defined." % name)
return a
def combine(*terms, **kwargs):
recursive = kwargs.pop('recursive', False)
list_merge = kwargs.pop('list_merge', 'replace')
if kwargs:
raise AnsibleFilterError("'recursive' and 'list_merge' are the only valid keyword arguments")
# allow the user to do `[dict1, dict2, ...] | combine`
dictionaries = flatten(terms, levels=1)
# recursively check that every elements are defined (for jinja2)
recursive_check_defined(dictionaries)
if not dictionaries:
return {}
if len(dictionaries) == 1:
return dictionaries[0]
# merge all the dicts so that the dict at the end of the array have precedence
# over the dict at the beginning.
# we merge the dicts from the highest to the lowest priority because there is
# a huge probability that the lowest priority dict will be the biggest in size
# (as the low prio dict will hold the "default" values and the others will be "patches")
# and merge_hash create a copy of it's first argument.
# so high/right -> low/left is more efficient than low/left -> high/right
high_to_low_prio_dict_iterator = reversed(dictionaries)
result = next(high_to_low_prio_dict_iterator)
for dictionary in high_to_low_prio_dict_iterator:
result = merge_hash(dictionary, result, recursive, list_merge)
return result
def comment(text, style='plain', **kw):
# Predefined comment types
comment_styles = {
'plain': {
'decoration': '# '
},
'erlang': {
'decoration': '% '
},
'c': {
'decoration': '// '
},
'cblock': {
'beginning': '/*',
'decoration': ' * ',
'end': ' */'
},
'xml': {
'beginning': '<!--',
'decoration': ' - ',
'end': '-->'
}
}
# Pointer to the right comment type
style_params = comment_styles[style]
if 'decoration' in kw:
prepostfix = kw['decoration']
else:
prepostfix = style_params['decoration']
# Default params
p = {
'newline': '\n',
'beginning': '',
'prefix': (prepostfix).rstrip(),
'prefix_count': 1,
'decoration': '',
'postfix': (prepostfix).rstrip(),
'postfix_count': 1,
'end': ''
}
# Update default params
p.update(style_params)
p.update(kw)
# Compose substrings for the final string
str_beginning = ''
if p['beginning']:
str_beginning = "%s%s" % (p['beginning'], p['newline'])
str_prefix = ''
if p['prefix']:
if p['prefix'] != p['newline']:
str_prefix = str(
"%s%s" % (p['prefix'], p['newline'])) * int(p['prefix_count'])
else:
str_prefix = str(
"%s" % (p['newline'])) * int(p['prefix_count'])
str_text = ("%s%s" % (
p['decoration'],
# Prepend each line of the text with the decorator
text.replace(
p['newline'], "%s%s" % (p['newline'], p['decoration'])))).replace(
# Remove trailing spaces when only decorator is on the line
"%s%s" % (p['decoration'], p['newline']),
"%s%s" % (p['decoration'].rstrip(), p['newline']))
str_postfix = p['newline'].join(
[''] + [p['postfix'] for x in range(p['postfix_count'])])
str_end = ''
if p['end']:
str_end = "%s%s" % (p['newline'], p['end'])
# Return the final string
return "%s%s%s%s%s" % (
str_beginning,
str_prefix,
str_text,
str_postfix,
str_end)
@environmentfilter
def extract(environment, item, container, morekeys=None):
if morekeys is None:
keys = [item]
elif isinstance(morekeys, list):
keys = [item] + morekeys
else:
keys = [item, morekeys]
value = container
for key in keys:
value = environment.getitem(value, key)
return value
@environmentfilter
def do_groupby(environment, value, attribute):
"""Overridden groupby filter for jinja2, to address an issue with
jinja2>=2.9.0,<2.9.5 where a namedtuple was returned which
has repr that prevents ansible.template.safe_eval.safe_eval from being
able to parse and eval the data.
jinja2<2.9.0,>=2.9.5 is not affected, as <2.9.0 uses a tuple, and
>=2.9.5 uses a standard tuple repr on the namedtuple.
The adaptation here, is to run the jinja2 `do_groupby` function, and
cast all of the namedtuples to a regular tuple.
See https://github.com/ansible/ansible/issues/20098
We may be able to remove this in the future.
"""
return [tuple(t) for t in _do_groupby(environment, value, attribute)]
def b64encode(string, encoding='utf-8'):
return to_text(base64.b64encode(to_bytes(string, encoding=encoding, errors='surrogate_or_strict')))
def b64decode(string, encoding='utf-8'):
return to_text(base64.b64decode(to_bytes(string, errors='surrogate_or_strict')), encoding=encoding)
def flatten(mylist, levels=None, skip_nulls=True):
ret = []
for element in mylist:
if skip_nulls and element in (None, 'None', 'null'):
# ignore null items
continue
elif is_sequence(element):
if levels is None:
ret.extend(flatten(element, skip_nulls=skip_nulls))
elif levels >= 1:
# decrement as we go down the stack
ret.extend(flatten(element, levels=(int(levels) - 1), skip_nulls=skip_nulls))
else:
ret.append(element)
else:
ret.append(element)
return ret
def subelements(obj, subelements, skip_missing=False):
'''Accepts a dict or list of dicts, and a dotted accessor and produces a product
of the element and the results of the dotted accessor
>>> obj = [{"name": "alice", "groups": ["wheel"], "authorized": ["/tmp/alice/onekey.pub"]}]
>>> subelements(obj, 'groups')
[({'name': 'alice', 'groups': ['wheel'], 'authorized': ['/tmp/alice/onekey.pub']}, 'wheel')]
'''
if isinstance(obj, dict):
element_list = list(obj.values())
elif isinstance(obj, list):
element_list = obj[:]
else:
raise AnsibleFilterError('obj must be a list of dicts or a nested dict')
if isinstance(subelements, list):
subelement_list = subelements[:]
elif isinstance(subelements, string_types):
subelement_list = subelements.split('.')
else:
raise AnsibleFilterTypeError('subelements must be a list or a string')
results = []
for element in element_list:
values = element
for subelement in subelement_list:
try:
values = values[subelement]
except KeyError:
if skip_missing:
values = []
break
raise AnsibleFilterError("could not find %r key in iterated item %r" % (subelement, values))
except TypeError:
raise AnsibleFilterTypeError("the key %s should point to a dictionary, got '%s'" % (subelement, values))
if not isinstance(values, list):
raise AnsibleFilterTypeError("the key %r should point to a list, got %r" % (subelement, values))
for value in values:
results.append((element, value))
return results
def dict_to_list_of_dict_key_value_elements(mydict, key_name='key', value_name='value'):
''' takes a dictionary and transforms it into a list of dictionaries,
with each having a 'key' and 'value' keys that correspond to the keys and values of the original '''
if not isinstance(mydict, Mapping):
raise AnsibleFilterTypeError("dict2items requires a dictionary, got %s instead." % type(mydict))
ret = []
for key in mydict:
ret.append({key_name: key, value_name: mydict[key]})
return ret
def list_of_dict_key_value_elements_to_dict(mylist, key_name='key', value_name='value'):
''' takes a list of dicts with each having a 'key' and 'value' keys, and transforms the list into a dictionary,
effectively as the reverse of dict2items '''
if not is_sequence(mylist):
raise AnsibleFilterTypeError("items2dict requires a list, got %s instead." % type(mylist))
return dict((item[key_name], item[value_name]) for item in mylist)
def path_join(paths):
''' takes a sequence or a string, and return a concatenation
of the different members '''
if isinstance(paths, string_types):
return os.path.join(paths)
elif is_sequence(paths):
return os.path.join(*paths)
else:
raise AnsibleFilterTypeError("|path_join expects string or sequence, got %s instead." % type(paths))
class FilterModule(object):
''' Ansible core jinja2 filters '''
def filters(self):
return {
# jinja2 overrides
'groupby': do_groupby,
# base 64
'b64decode': b64decode,
'b64encode': b64encode,
# uuid
'to_uuid': to_uuid,
# json
'to_json': to_json,
'to_nice_json': to_nice_json,
'from_json': json.loads,
# yaml
'to_yaml': to_yaml,
'to_nice_yaml': to_nice_yaml,
'from_yaml': from_yaml,
'from_yaml_all': from_yaml_all,
# path
'basename': partial(unicode_wrap, os.path.basename),
'dirname': partial(unicode_wrap, os.path.dirname),
'expanduser': partial(unicode_wrap, os.path.expanduser),
'expandvars': partial(unicode_wrap, os.path.expandvars),
'path_join': path_join,
'realpath': partial(unicode_wrap, os.path.realpath),
'relpath': partial(unicode_wrap, os.path.relpath),
'splitext': partial(unicode_wrap, os.path.splitext),
'win_basename': partial(unicode_wrap, ntpath.basename),
'win_dirname': partial(unicode_wrap, ntpath.dirname),
'win_splitdrive': partial(unicode_wrap, ntpath.splitdrive),
# file glob
'fileglob': fileglob,
# types
'bool': to_bool,
'to_datetime': to_datetime,
# date formatting
'strftime': strftime,
# quote string for shell usage
'quote': quote,
# hash filters
# md5 hex digest of string
'md5': md5s,
# sha1 hex digest of string
'sha1': checksum_s,
# checksum of string as used by ansible for checksumming files
'checksum': checksum_s,
# generic hashing
'password_hash': get_encrypted_password,
'hash': get_hash,
# regex
'regex_replace': regex_replace,
'regex_escape': regex_escape,
'regex_search': regex_search,
'regex_findall': regex_findall,
# ? : ;
'ternary': ternary,
# random stuff
'random': rand,
'shuffle': randomize_list,
# undefined
'mandatory': mandatory,
# comment-style decoration
'comment': comment,
# debug
'type_debug': lambda o: o.__class__.__name__,
# Data structures
'combine': combine,
'extract': extract,
'flatten': flatten,
'dict2items': dict_to_list_of_dict_key_value_elements,
'items2dict': list_of_dict_key_value_elements_to_dict,
'subelements': subelements,
'split': partial(unicode_wrap, text_type.split),
}
|
gpl-3.0
|
gencer/sentry
|
src/sentry/api/endpoints/organization_teams.py
|
2
|
4795
|
from __future__ import absolute_import
from django.db import IntegrityError, transaction
from rest_framework import serializers, status
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.organization import OrganizationEndpoint, OrganizationPermission
from sentry.api.serializers import serialize
from sentry.api.serializers.models.team import TeamWithProjectsSerializer
from sentry.models import (
AuditLogEntryEvent, OrganizationMember, OrganizationMemberTeam, Team, TeamStatus
)
from sentry.utils.apidocs import scenario, attach_scenarios
@scenario('CreateNewTeam')
def create_new_team_scenario(runner):
runner.request(
method='POST',
path='/organizations/%s/teams/' % runner.org.slug,
data={
'name': 'Ancient Gabelers',
}
)
@scenario('ListOrganizationTeams')
def list_organization_teams_scenario(runner):
runner.request(method='GET', path='/organizations/%s/teams/' % runner.org.slug)
# OrganizationPermission + team:write
class OrganizationTeamsPermission(OrganizationPermission):
def __init__(self):
for m in 'POST', 'PUT', 'DELETE':
self.scope_map[m].append('team:write')
class TeamSerializer(serializers.Serializer):
name = serializers.CharField(max_length=200, required=True)
slug = serializers.RegexField(r'^[a-z0-9_\-]+$', max_length=50, required=False)
class OrganizationTeamsEndpoint(OrganizationEndpoint):
permission_classes = (OrganizationTeamsPermission,)
doc_section = DocSection.TEAMS
@attach_scenarios([list_organization_teams_scenario])
def get(self, request, organization):
"""
List an Organization's Teams
````````````````````````````
Return a list of teams bound to a organization.
:pparam string organization_slug: the slug of the organization for
which the teams should be listed.
:auth: required
"""
# TODO(dcramer): this should be system-wide default for organization
# based endpoints
if request.auth and hasattr(request.auth, 'project'):
return Response(status=403)
team_list = list(
Team.objects.filter(
organization=organization,
status=TeamStatus.VISIBLE,
).order_by('name', 'slug')
)
return Response(serialize(team_list, request.user, TeamWithProjectsSerializer()))
@attach_scenarios([create_new_team_scenario])
def post(self, request, organization):
"""
Create a new Team
``````````````````
Create a new team bound to an organization. Only the name of the
team is needed to create it, the slug can be auto generated.
:pparam string organization_slug: the slug of the organization the
team should be created for.
:param string name: the name of the organization.
:param string slug: the optional slug for this organization. If
not provided it will be auto generated from the
name.
:auth: required
"""
serializer = TeamSerializer(data=request.DATA)
if serializer.is_valid():
result = serializer.object
try:
with transaction.atomic():
team = Team.objects.create(
name=result['name'],
slug=result.get('slug'),
organization=organization,
)
except IntegrityError:
return Response(
{
'detail': 'A team with this slug already exists.'
},
status=409,
)
if request.user.is_authenticated():
try:
member = OrganizationMember.objects.get(
user=request.user,
organization=organization,
)
except OrganizationMember.DoesNotExist:
pass
else:
OrganizationMemberTeam.objects.create(
team=team,
organizationmember=member,
)
self.create_audit_entry(
request=request,
organization=organization,
target_object=team.id,
event=AuditLogEntryEvent.TEAM_ADD,
data=team.get_audit_log_data(),
)
return Response(serialize(team, request.user), status=201)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
bsd-3-clause
|
40223149/2015cd_midterm
|
static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_discovery.py
|
785
|
13838
|
import os
import re
import sys
import unittest
class TestableTestProgram(unittest.TestProgram):
module = '__main__'
exit = True
defaultTest = failfast = catchbreak = buffer = None
verbosity = 1
progName = ''
testRunner = testLoader = None
def __init__(self):
pass
class TestDiscovery(unittest.TestCase):
# Heavily mocked tests so I can avoid hitting the filesystem
def test_get_name_from_path(self):
loader = unittest.TestLoader()
loader._top_level_dir = '/foo'
name = loader._get_name_from_path('/foo/bar/baz.py')
self.assertEqual(name, 'bar.baz')
if not __debug__:
# asserts are off
return
with self.assertRaises(AssertionError):
loader._get_name_from_path('/bar/baz.py')
def test_find_tests(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
path_lists = [['test1.py', 'test2.py', 'not_a_test.py', 'test_dir',
'test.foo', 'test-not-a-module.py', 'another_dir'],
['test3.py', 'test4.py', ]]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
def isdir(path):
return path.endswith('dir')
os.path.isdir = isdir
self.addCleanup(restore_isdir)
def isfile(path):
# another_dir is not a package and so shouldn't be recursed into
return not path.endswith('dir') and not 'another_dir' in path
os.path.isfile = isfile
self.addCleanup(restore_isfile)
loader._get_module_from_name = lambda path: path + ' module'
loader.loadTestsFromModule = lambda module: module + ' tests'
top_level = os.path.abspath('/foo')
loader._top_level_dir = top_level
suite = list(loader._find_tests(top_level, 'test*.py'))
expected = [name + ' module tests' for name in
('test1', 'test2')]
expected.extend([('test_dir.%s' % name) + ' module tests' for name in
('test3', 'test4')])
self.assertEqual(suite, expected)
def test_find_tests_with_package(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
directories = ['a_directory', 'test_directory', 'test_directory2']
path_lists = [directories, [], [], []]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
os.path.isdir = lambda path: True
self.addCleanup(restore_isdir)
os.path.isfile = lambda path: os.path.basename(path) not in directories
self.addCleanup(restore_isfile)
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
self.paths.append(path)
if os.path.basename(path) == 'test_directory':
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
return 'load_tests'
self.load_tests = load_tests
def __eq__(self, other):
return self.path == other.path
loader._get_module_from_name = lambda name: Module(name)
def loadTestsFromModule(module, use_load_tests):
if use_load_tests:
raise self.failureException('use_load_tests should be False for packages')
return module.path + ' module tests'
loader.loadTestsFromModule = loadTestsFromModule
loader._top_level_dir = '/foo'
# this time no '.py' on the pattern so that it can match
# a test package
suite = list(loader._find_tests('/foo', 'test*'))
# We should have loaded tests from the test_directory package by calling load_tests
# and directly from the test_directory2 package
self.assertEqual(suite,
['load_tests', 'test_directory2' + ' module tests'])
self.assertEqual(Module.paths, ['test_directory', 'test_directory2'])
# load_tests should have been called once with loader, tests and pattern
self.assertEqual(Module.load_tests_args,
[(loader, 'test_directory' + ' module tests', 'test*')])
def test_discover(self):
loader = unittest.TestLoader()
original_isfile = os.path.isfile
original_isdir = os.path.isdir
def restore_isfile():
os.path.isfile = original_isfile
os.path.isfile = lambda path: False
self.addCleanup(restore_isfile)
orig_sys_path = sys.path[:]
def restore_path():
sys.path[:] = orig_sys_path
self.addCleanup(restore_path)
full_path = os.path.abspath(os.path.normpath('/foo'))
with self.assertRaises(ImportError):
loader.discover('/foo/bar', top_level_dir='/foo')
self.assertEqual(loader._top_level_dir, full_path)
self.assertIn(full_path, sys.path)
os.path.isfile = lambda path: True
os.path.isdir = lambda path: True
def restore_isdir():
os.path.isdir = original_isdir
self.addCleanup(restore_isdir)
_find_tests_args = []
def _find_tests(start_dir, pattern):
_find_tests_args.append((start_dir, pattern))
return ['tests']
loader._find_tests = _find_tests
loader.suiteClass = str
suite = loader.discover('/foo/bar/baz', 'pattern', '/foo/bar')
top_level_dir = os.path.abspath('/foo/bar')
start_dir = os.path.abspath('/foo/bar/baz')
self.assertEqual(suite, "['tests']")
self.assertEqual(loader._top_level_dir, top_level_dir)
self.assertEqual(_find_tests_args, [(start_dir, 'pattern')])
self.assertIn(top_level_dir, sys.path)
def test_discover_with_modules_that_fail_to_import(self):
loader = unittest.TestLoader()
listdir = os.listdir
os.listdir = lambda _: ['test_this_does_not_exist.py']
isfile = os.path.isfile
os.path.isfile = lambda _: True
orig_sys_path = sys.path[:]
def restore():
os.path.isfile = isfile
os.listdir = listdir
sys.path[:] = orig_sys_path
self.addCleanup(restore)
suite = loader.discover('.')
self.assertIn(os.getcwd(), sys.path)
self.assertEqual(suite.countTestCases(), 1)
test = list(list(suite)[0])[0] # extract test from suite
with self.assertRaises(ImportError):
test.test_this_does_not_exist()
def test_command_line_handling_parseArgs(self):
program = TestableTestProgram()
args = []
def do_discovery(argv):
args.extend(argv)
program._do_discovery = do_discovery
program.parseArgs(['something', 'discover'])
self.assertEqual(args, [])
program.parseArgs(['something', 'discover', 'foo', 'bar'])
self.assertEqual(args, ['foo', 'bar'])
def test_command_line_handling_discover_by_default(self):
program = TestableTestProgram()
program.module = None
self.called = False
def do_discovery(argv):
self.called = True
self.assertEqual(argv, [])
program._do_discovery = do_discovery
program.parseArgs(['something'])
self.assertTrue(self.called)
def test_command_line_handling_discover_by_default_with_options(self):
program = TestableTestProgram()
program.module = None
args = ['something', '-v', '-b', '-v', '-c', '-f']
self.called = False
def do_discovery(argv):
self.called = True
self.assertEqual(argv, args[1:])
program._do_discovery = do_discovery
program.parseArgs(args)
self.assertTrue(self.called)
def test_command_line_handling_do_discovery_too_many_arguments(self):
class Stop(Exception):
pass
def usageExit():
raise Stop
program = TestableTestProgram()
program.usageExit = usageExit
with self.assertRaises(Stop):
# too many args
program._do_discovery(['one', 'two', 'three', 'four'])
def test_command_line_handling_do_discovery_calls_loader(self):
program = TestableTestProgram()
class Loader(object):
args = []
def discover(self, start_dir, pattern, top_level_dir):
self.args.append((start_dir, pattern, top_level_dir))
return 'tests'
program._do_discovery(['-v'], Loader=Loader)
self.assertEqual(program.verbosity, 2)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['--verbose'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery([], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish', 'eggs'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish', 'eggs', 'ham'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', 'ham')])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-s', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-t', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', 'fish')])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-p', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'fish', None)])
self.assertFalse(program.failfast)
self.assertFalse(program.catchbreak)
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-p', 'eggs', '-s', 'fish', '-v', '-f', '-c'],
Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
self.assertEqual(program.verbosity, 2)
self.assertTrue(program.failfast)
self.assertTrue(program.catchbreak)
def test_detect_module_clash(self):
class Module(object):
__file__ = 'bar/foo.py'
sys.modules['foo'] = Module
full_path = os.path.abspath('foo')
original_listdir = os.listdir
original_isfile = os.path.isfile
original_isdir = os.path.isdir
def cleanup():
os.listdir = original_listdir
os.path.isfile = original_isfile
os.path.isdir = original_isdir
del sys.modules['foo']
if full_path in sys.path:
sys.path.remove(full_path)
self.addCleanup(cleanup)
def listdir(_):
return ['foo.py']
def isfile(_):
return True
def isdir(_):
return True
os.listdir = listdir
os.path.isfile = isfile
os.path.isdir = isdir
loader = unittest.TestLoader()
mod_dir = os.path.abspath('bar')
expected_dir = os.path.abspath('foo')
msg = re.escape(r"'foo' module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?" % (mod_dir, expected_dir))
self.assertRaisesRegex(
ImportError, '^%s$' % msg, loader.discover,
start_dir='foo', pattern='foo.py'
)
self.assertEqual(sys.path[0], full_path)
def test_discovery_from_dotted_path(self):
loader = unittest.TestLoader()
tests = [self]
expectedPath = os.path.abspath(os.path.dirname(unittest.test.__file__))
self.wasRun = False
def _find_tests(start_dir, pattern):
self.wasRun = True
self.assertEqual(start_dir, expectedPath)
return tests
loader._find_tests = _find_tests
suite = loader.discover('unittest.test')
self.assertTrue(self.wasRun)
self.assertEqual(suite._tests, tests)
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
|
runt18/nupic
|
tests/unit/nupic/math/nupic_random_test.py
|
30
|
5529
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""NuPIC random module tests."""
import cPickle as pickle
import unittest
import numpy
from nupic.bindings.math import Random
class TestNupicRandom(unittest.TestCase):
def testNupicRandomPickling(self):
"""Test pickling / unpickling of NuPIC randomness."""
# Simple test: make sure that dumping / loading works...
r = Random(42)
pickledR = pickle.dumps(r)
test1 = [r.getUInt32() for _ in xrange(10)]
r = pickle.loads(pickledR)
test2 = [r.getUInt32() for _ in xrange(10)]
self.assertEqual(test1, test2,
"Simple NuPIC random pickle/unpickle failed.")
# A little tricker: dump / load _after_ some numbers have been generated
# (in the first test). Things should still work...
# ...the idea of this test is to make sure that the pickle code isn't just
# saving the initial seed...
pickledR = pickle.dumps(r)
test3 = [r.getUInt32() for _ in xrange(10)]
r = pickle.loads(pickledR)
test4 = [r.getUInt32() for _ in xrange(10)]
self.assertEqual(
test3, test4,
"NuPIC random pickle/unpickle didn't work for saving later state.")
self.assertNotEqual(test1, test3,
"NuPIC random gave the same result twice?!?")
def testSample(self):
r = Random(42)
population = numpy.array([1, 2, 3, 4], dtype="uint32")
choices = numpy.zeros([2], dtype="uint32")
r.sample(population, choices)
self.assertEqual(choices[0], 1)
self.assertEqual(choices[1], 3)
def testSampleNone(self):
r = Random(42)
population = numpy.array([1, 2, 3, 4], dtype="uint32")
choices = numpy.zeros([0], dtype="uint32")
# Just make sure there is no exception thrown.
r.sample(population, choices)
self.assertEqual(choices.size, 0)
def testSampleAll(self):
r = Random(42)
population = numpy.array([1, 2, 3, 4], dtype="uint32")
choices = numpy.zeros([4], dtype="uint32")
r.sample(population, choices)
self.assertEqual(choices[0], 1)
self.assertEqual(choices[1], 2)
self.assertEqual(choices[2], 3)
self.assertEqual(choices[3], 4)
def testSampleWrongDimensionsPopulation(self):
"""Check that passing a multi-dimensional array throws a ValueError."""
r = Random(42)
population = numpy.array([[1, 2], [3, 4]], dtype="uint32")
choices = numpy.zeros([2], dtype="uint32")
self.assertRaises(ValueError, r.sample, population, choices)
def testSampleWrongDimensionsChoices(self):
"""Check that passing a multi-dimensional array throws a ValueError."""
r = Random(42)
population = numpy.array([1, 2, 3, 4], dtype="uint32")
choices = numpy.zeros([2, 2], dtype="uint32")
self.assertRaises(ValueError, r.sample, population, choices)
def testSampleSequenceRaisesTypeError(self):
"""Check that passing lists throws a TypeError.
This behavior may change if sample is extended to understand sequences.
"""
r = Random(42)
population = [1, 2, 3, 4]
choices = [0, 0]
self.assertRaises(TypeError, r.sample, population, choices)
def testSampleBadDtype(self):
r = Random(42)
population = numpy.array([1, 2, 3, 4], dtype="int64")
choices = numpy.zeros([2], dtype="int64")
self.assertRaises(TypeError, r.sample, population, choices)
def testSampleDifferentDtypes(self):
r = Random(42)
population = numpy.array([1, 2, 3, 4], dtype="uint32")
choices = numpy.zeros([2], dtype="uint64")
self.assertRaises(ValueError, r.sample, population, choices)
def testSamplePopulationTooSmall(self):
r = Random(42)
population = numpy.array([1, 2, 3, 4], dtype="uint32")
choices = numpy.zeros([5], dtype="uint32")
self.assertRaises(
ValueError, r.sample, population, choices)
def testShuffle(self):
r = Random(42)
arr = numpy.array([1, 2, 3, 4], dtype="uint32")
r.shuffle(arr)
self.assertEqual(arr[0], 1)
self.assertEqual(arr[1], 4)
self.assertEqual(arr[2], 3)
self.assertEqual(arr[3], 2)
def testShuffleEmpty(self):
r = Random(42)
arr = numpy.zeros([0], dtype="uint32")
r.shuffle(arr)
self.assertEqual(arr.size, 0)
def testShuffleEmpty(self):
r = Random(42)
arr = numpy.zeros([2, 2], dtype="uint32")
self.assertRaises(ValueError, r.shuffle, arr)
def testShuffleBadDtype(self):
r = Random(42)
arr = numpy.array([1, 2, 3, 4], dtype="int64")
self.assertRaises(ValueError, r.shuffle, arr)
if __name__ == "__main__":
unittest.main()
|
agpl-3.0
|
reedt/git
|
contrib/hooks/multimail/git_multimail.py
|
186
|
110172
|
#! /usr/bin/env python2
# Copyright (c) 2015 Matthieu Moy and others
# Copyright (c) 2012-2014 Michael Haggerty and others
# Derived from contrib/hooks/post-receive-email, which is
# Copyright (c) 2007 Andy Parkins
# and also includes contributions by other authors.
#
# This file is part of git-multimail.
#
# git-multimail is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License version
# 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
"""Generate notification emails for pushes to a git repository.
This hook sends emails describing changes introduced by pushes to a
git repository. For each reference that was changed, it emits one
ReferenceChange email summarizing how the reference was changed,
followed by one Revision email for each new commit that was introduced
by the reference change.
Each commit is announced in exactly one Revision email. If the same
commit is merged into another branch in the same or a later push, then
the ReferenceChange email will list the commit's SHA1 and its one-line
summary, but no new Revision email will be generated.
This script is designed to be used as a "post-receive" hook in a git
repository (see githooks(5)). It can also be used as an "update"
script, but this usage is not completely reliable and is deprecated.
To help with debugging, this script accepts a --stdout option, which
causes the emails to be written to standard output rather than sent
using sendmail.
See the accompanying README file for the complete documentation.
"""
import sys
import os
import re
import bisect
import socket
import subprocess
import shlex
import optparse
import smtplib
import time
try:
from email.utils import make_msgid
from email.utils import getaddresses
from email.utils import formataddr
from email.utils import formatdate
from email.header import Header
except ImportError:
# Prior to Python 2.5, the email module used different names:
from email.Utils import make_msgid
from email.Utils import getaddresses
from email.Utils import formataddr
from email.Utils import formatdate
from email.Header import Header
DEBUG = False
ZEROS = '0' * 40
LOGBEGIN = '- Log -----------------------------------------------------------------\n'
LOGEND = '-----------------------------------------------------------------------\n'
ADDR_HEADERS = set(['from', 'to', 'cc', 'bcc', 'reply-to', 'sender'])
# It is assumed in many places that the encoding is uniformly UTF-8,
# so changing these constants is unsupported. But define them here
# anyway, to make it easier to find (at least most of) the places
# where the encoding is important.
(ENCODING, CHARSET) = ('UTF-8', 'utf-8')
REF_CREATED_SUBJECT_TEMPLATE = (
'%(emailprefix)s%(refname_type)s %(short_refname)s created'
' (now %(newrev_short)s)'
)
REF_UPDATED_SUBJECT_TEMPLATE = (
'%(emailprefix)s%(refname_type)s %(short_refname)s updated'
' (%(oldrev_short)s -> %(newrev_short)s)'
)
REF_DELETED_SUBJECT_TEMPLATE = (
'%(emailprefix)s%(refname_type)s %(short_refname)s deleted'
' (was %(oldrev_short)s)'
)
COMBINED_REFCHANGE_REVISION_SUBJECT_TEMPLATE = (
'%(emailprefix)s%(refname_type)s %(short_refname)s updated: %(oneline)s'
)
REFCHANGE_HEADER_TEMPLATE = """\
Date: %(send_date)s
To: %(recipients)s
Subject: %(subject)s
MIME-Version: 1.0
Content-Type: text/plain; charset=%(charset)s
Content-Transfer-Encoding: 8bit
Message-ID: %(msgid)s
From: %(fromaddr)s
Reply-To: %(reply_to)s
X-Git-Host: %(fqdn)s
X-Git-Repo: %(repo_shortname)s
X-Git-Refname: %(refname)s
X-Git-Reftype: %(refname_type)s
X-Git-Oldrev: %(oldrev)s
X-Git-Newrev: %(newrev)s
Auto-Submitted: auto-generated
"""
REFCHANGE_INTRO_TEMPLATE = """\
This is an automated email from the git hooks/post-receive script.
%(pusher)s pushed a change to %(refname_type)s %(short_refname)s
in repository %(repo_shortname)s.
"""
FOOTER_TEMPLATE = """\
-- \n\
To stop receiving notification emails like this one, please contact
%(administrator)s.
"""
REWIND_ONLY_TEMPLATE = """\
This update removed existing revisions from the reference, leaving the
reference pointing at a previous point in the repository history.
* -- * -- N %(refname)s (%(newrev_short)s)
\\
O -- O -- O (%(oldrev_short)s)
Any revisions marked "omits" are not gone; other references still
refer to them. Any revisions marked "discards" are gone forever.
"""
NON_FF_TEMPLATE = """\
This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
%(refname_type)s are not in the new version. This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:
* -- * -- B -- O -- O -- O (%(oldrev_short)s)
\\
N -- N -- N %(refname)s (%(newrev_short)s)
You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.
Any revisions marked "omits" are not gone; other references still
refer to them. Any revisions marked "discards" are gone forever.
"""
NO_NEW_REVISIONS_TEMPLATE = """\
No new revisions were added by this update.
"""
DISCARDED_REVISIONS_TEMPLATE = """\
This change permanently discards the following revisions:
"""
NO_DISCARDED_REVISIONS_TEMPLATE = """\
The revisions that were on this %(refname_type)s are still contained in
other references; therefore, this change does not discard any commits
from the repository.
"""
NEW_REVISIONS_TEMPLATE = """\
The %(tot)s revisions listed above as "new" are entirely new to this
repository and will be described in separate emails. The revisions
listed as "adds" were already present in the repository and have only
been added to this reference.
"""
TAG_CREATED_TEMPLATE = """\
at %(newrev_short)-9s (%(newrev_type)s)
"""
TAG_UPDATED_TEMPLATE = """\
*** WARNING: tag %(short_refname)s was modified! ***
from %(oldrev_short)-9s (%(oldrev_type)s)
to %(newrev_short)-9s (%(newrev_type)s)
"""
TAG_DELETED_TEMPLATE = """\
*** WARNING: tag %(short_refname)s was deleted! ***
"""
# The template used in summary tables. It looks best if this uses the
# same alignment as TAG_CREATED_TEMPLATE and TAG_UPDATED_TEMPLATE.
BRIEF_SUMMARY_TEMPLATE = """\
%(action)10s %(rev_short)-9s %(text)s
"""
NON_COMMIT_UPDATE_TEMPLATE = """\
This is an unusual reference change because the reference did not
refer to a commit either before or after the change. We do not know
how to provide full information about this reference change.
"""
REVISION_HEADER_TEMPLATE = """\
Date: %(send_date)s
To: %(recipients)s
Cc: %(cc_recipients)s
Subject: %(emailprefix)s%(num)02d/%(tot)02d: %(oneline)s
MIME-Version: 1.0
Content-Type: text/plain; charset=%(charset)s
Content-Transfer-Encoding: 8bit
From: %(fromaddr)s
Reply-To: %(reply_to)s
In-Reply-To: %(reply_to_msgid)s
References: %(reply_to_msgid)s
X-Git-Host: %(fqdn)s
X-Git-Repo: %(repo_shortname)s
X-Git-Refname: %(refname)s
X-Git-Reftype: %(refname_type)s
X-Git-Rev: %(rev)s
Auto-Submitted: auto-generated
"""
REVISION_INTRO_TEMPLATE = """\
This is an automated email from the git hooks/post-receive script.
%(pusher)s pushed a commit to %(refname_type)s %(short_refname)s
in repository %(repo_shortname)s.
"""
REVISION_FOOTER_TEMPLATE = FOOTER_TEMPLATE
# Combined, meaning refchange+revision email (for single-commit additions)
COMBINED_HEADER_TEMPLATE = """\
Date: %(send_date)s
To: %(recipients)s
Subject: %(subject)s
MIME-Version: 1.0
Content-Type: text/plain; charset=%(charset)s
Content-Transfer-Encoding: 8bit
Message-ID: %(msgid)s
From: %(fromaddr)s
Reply-To: %(reply_to)s
X-Git-Host: %(fqdn)s
X-Git-Repo: %(repo_shortname)s
X-Git-Refname: %(refname)s
X-Git-Reftype: %(refname_type)s
X-Git-Oldrev: %(oldrev)s
X-Git-Newrev: %(newrev)s
X-Git-Rev: %(rev)s
Auto-Submitted: auto-generated
"""
COMBINED_INTRO_TEMPLATE = """\
This is an automated email from the git hooks/post-receive script.
%(pusher)s pushed a commit to %(refname_type)s %(short_refname)s
in repository %(repo_shortname)s.
"""
COMBINED_FOOTER_TEMPLATE = FOOTER_TEMPLATE
class CommandError(Exception):
def __init__(self, cmd, retcode):
self.cmd = cmd
self.retcode = retcode
Exception.__init__(
self,
'Command "%s" failed with retcode %s' % (' '.join(cmd), retcode,)
)
class ConfigurationException(Exception):
pass
# The "git" program (this could be changed to include a full path):
GIT_EXECUTABLE = 'git'
# How "git" should be invoked (including global arguments), as a list
# of words. This variable is usually initialized automatically by
# read_git_output() via choose_git_command(), but if a value is set
# here then it will be used unconditionally.
GIT_CMD = None
def choose_git_command():
"""Decide how to invoke git, and record the choice in GIT_CMD."""
global GIT_CMD
if GIT_CMD is None:
try:
# Check to see whether the "-c" option is accepted (it was
# only added in Git 1.7.2). We don't actually use the
# output of "git --version", though if we needed more
# specific version information this would be the place to
# do it.
cmd = [GIT_EXECUTABLE, '-c', 'foo.bar=baz', '--version']
read_output(cmd)
GIT_CMD = [GIT_EXECUTABLE, '-c', 'i18n.logoutputencoding=%s' % (ENCODING,)]
except CommandError:
GIT_CMD = [GIT_EXECUTABLE]
def read_git_output(args, input=None, keepends=False, **kw):
"""Read the output of a Git command."""
if GIT_CMD is None:
choose_git_command()
return read_output(GIT_CMD + args, input=input, keepends=keepends, **kw)
def read_output(cmd, input=None, keepends=False, **kw):
if input:
stdin = subprocess.PIPE
else:
stdin = None
p = subprocess.Popen(
cmd, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kw
)
(out, err) = p.communicate(input)
retcode = p.wait()
if retcode:
raise CommandError(cmd, retcode)
if not keepends:
out = out.rstrip('\n\r')
return out
def read_git_lines(args, keepends=False, **kw):
"""Return the lines output by Git command.
Return as single lines, with newlines stripped off."""
return read_git_output(args, keepends=True, **kw).splitlines(keepends)
def git_rev_list_ish(cmd, spec, args=None, **kw):
"""Common functionality for invoking a 'git rev-list'-like command.
Parameters:
* cmd is the Git command to run, e.g., 'rev-list' or 'log'.
* spec is a list of revision arguments to pass to the named
command. If None, this function returns an empty list.
* args is a list of extra arguments passed to the named command.
* All other keyword arguments (if any) are passed to the
underlying read_git_lines() function.
Return the output of the Git command in the form of a list, one
entry per output line.
"""
if spec is None:
return []
if args is None:
args = []
args = [cmd, '--stdin'] + args
spec_stdin = ''.join(s + '\n' for s in spec)
return read_git_lines(args, input=spec_stdin, **kw)
def git_rev_list(spec, **kw):
"""Run 'git rev-list' with the given list of revision arguments.
See git_rev_list_ish() for parameter and return value
documentation.
"""
return git_rev_list_ish('rev-list', spec, **kw)
def git_log(spec, **kw):
"""Run 'git log' with the given list of revision arguments.
See git_rev_list_ish() for parameter and return value
documentation.
"""
return git_rev_list_ish('log', spec, **kw)
def header_encode(text, header_name=None):
"""Encode and line-wrap the value of an email header field."""
try:
if isinstance(text, str):
text = text.decode(ENCODING, 'replace')
return Header(text, header_name=header_name).encode()
except UnicodeEncodeError:
return Header(text, header_name=header_name, charset=CHARSET,
errors='replace').encode()
def addr_header_encode(text, header_name=None):
"""Encode and line-wrap the value of an email header field containing
email addresses."""
return Header(
', '.join(
formataddr((header_encode(name), emailaddr))
for name, emailaddr in getaddresses([text])
),
header_name=header_name
).encode()
class Config(object):
def __init__(self, section, git_config=None):
"""Represent a section of the git configuration.
If git_config is specified, it is passed to "git config" in
the GIT_CONFIG environment variable, meaning that "git config"
will read the specified path rather than the Git default
config paths."""
self.section = section
if git_config:
self.env = os.environ.copy()
self.env['GIT_CONFIG'] = git_config
else:
self.env = None
@staticmethod
def _split(s):
"""Split NUL-terminated values."""
words = s.split('\0')
assert words[-1] == ''
return words[:-1]
def get(self, name, default=None):
try:
values = self._split(read_git_output(
['config', '--get', '--null', '%s.%s' % (self.section, name)],
env=self.env, keepends=True,
))
assert len(values) == 1
return values[0]
except CommandError:
return default
def get_bool(self, name, default=None):
try:
value = read_git_output(
['config', '--get', '--bool', '%s.%s' % (self.section, name)],
env=self.env,
)
except CommandError:
return default
return value == 'true'
def get_all(self, name, default=None):
"""Read a (possibly multivalued) setting from the configuration.
Return the result as a list of values, or default if the name
is unset."""
try:
return self._split(read_git_output(
['config', '--get-all', '--null', '%s.%s' % (self.section, name)],
env=self.env, keepends=True,
))
except CommandError, e:
if e.retcode == 1:
# "the section or key is invalid"; i.e., there is no
# value for the specified key.
return default
else:
raise
def get_recipients(self, name, default=None):
"""Read a recipients list from the configuration.
Return the result as a comma-separated list of email
addresses, or default if the option is unset. If the setting
has multiple values, concatenate them with comma separators."""
lines = self.get_all(name, default=None)
if lines is None:
return default
return ', '.join(line.strip() for line in lines)
def set(self, name, value):
read_git_output(
['config', '%s.%s' % (self.section, name), value],
env=self.env,
)
def add(self, name, value):
read_git_output(
['config', '--add', '%s.%s' % (self.section, name), value],
env=self.env,
)
def __contains__(self, name):
return self.get_all(name, default=None) is not None
# We don't use this method anymore internally, but keep it here in
# case somebody is calling it from their own code:
def has_key(self, name):
return name in self
def unset_all(self, name):
try:
read_git_output(
['config', '--unset-all', '%s.%s' % (self.section, name)],
env=self.env,
)
except CommandError, e:
if e.retcode == 5:
# The name doesn't exist, which is what we wanted anyway...
pass
else:
raise
def set_recipients(self, name, value):
self.unset_all(name)
for pair in getaddresses([value]):
self.add(name, formataddr(pair))
def generate_summaries(*log_args):
"""Generate a brief summary for each revision requested.
log_args are strings that will be passed directly to "git log" as
revision selectors. Iterate over (sha1_short, subject) for each
commit specified by log_args (subject is the first line of the
commit message as a string without EOLs)."""
cmd = [
'log', '--abbrev', '--format=%h %s',
] + list(log_args) + ['--']
for line in read_git_lines(cmd):
yield tuple(line.split(' ', 1))
def limit_lines(lines, max_lines):
for (index, line) in enumerate(lines):
if index < max_lines:
yield line
if index >= max_lines:
yield '... %d lines suppressed ...\n' % (index + 1 - max_lines,)
def limit_linelength(lines, max_linelength):
for line in lines:
# Don't forget that lines always include a trailing newline.
if len(line) > max_linelength + 1:
line = line[:max_linelength - 7] + ' [...]\n'
yield line
class CommitSet(object):
"""A (constant) set of object names.
The set should be initialized with full SHA1 object names. The
__contains__() method returns True iff its argument is an
abbreviation of any the names in the set."""
def __init__(self, names):
self._names = sorted(names)
def __len__(self):
return len(self._names)
def __contains__(self, sha1_abbrev):
"""Return True iff this set contains sha1_abbrev (which might be abbreviated)."""
i = bisect.bisect_left(self._names, sha1_abbrev)
return i < len(self) and self._names[i].startswith(sha1_abbrev)
class GitObject(object):
def __init__(self, sha1, type=None):
if sha1 == ZEROS:
self.sha1 = self.type = self.commit_sha1 = None
else:
self.sha1 = sha1
self.type = type or read_git_output(['cat-file', '-t', self.sha1])
if self.type == 'commit':
self.commit_sha1 = self.sha1
elif self.type == 'tag':
try:
self.commit_sha1 = read_git_output(
['rev-parse', '--verify', '%s^0' % (self.sha1,)]
)
except CommandError:
# Cannot deref tag to determine commit_sha1
self.commit_sha1 = None
else:
self.commit_sha1 = None
self.short = read_git_output(['rev-parse', '--short', sha1])
def get_summary(self):
"""Return (sha1_short, subject) for this commit."""
if not self.sha1:
raise ValueError('Empty commit has no summary')
return iter(generate_summaries('--no-walk', self.sha1)).next()
def __eq__(self, other):
return isinstance(other, GitObject) and self.sha1 == other.sha1
def __hash__(self):
return hash(self.sha1)
def __nonzero__(self):
return bool(self.sha1)
def __str__(self):
return self.sha1 or ZEROS
class Change(object):
"""A Change that has been made to the Git repository.
Abstract class from which both Revisions and ReferenceChanges are
derived. A Change knows how to generate a notification email
describing itself."""
def __init__(self, environment):
self.environment = environment
self._values = None
def _compute_values(self):
"""Return a dictionary {keyword: expansion} for this Change.
Derived classes overload this method to add more entries to
the return value. This method is used internally by
get_values(). The return value should always be a new
dictionary."""
return self.environment.get_values()
def get_values(self, **extra_values):
"""Return a dictionary {keyword: expansion} for this Change.
Return a dictionary mapping keywords to the values that they
should be expanded to for this Change (used when interpolating
template strings). If any keyword arguments are supplied, add
those to the return value as well. The return value is always
a new dictionary."""
if self._values is None:
self._values = self._compute_values()
values = self._values.copy()
if extra_values:
values.update(extra_values)
return values
def expand(self, template, **extra_values):
"""Expand template.
Expand the template (which should be a string) using string
interpolation of the values for this Change. If any keyword
arguments are provided, also include those in the keywords
available for interpolation."""
return template % self.get_values(**extra_values)
def expand_lines(self, template, **extra_values):
"""Break template into lines and expand each line."""
values = self.get_values(**extra_values)
for line in template.splitlines(True):
yield line % values
def expand_header_lines(self, template, **extra_values):
"""Break template into lines and expand each line as an RFC 2822 header.
Encode values and split up lines that are too long. Silently
skip lines that contain references to unknown variables."""
values = self.get_values(**extra_values)
for line in template.splitlines():
(name, value) = line.split(':', 1)
try:
value = value % values
except KeyError, e:
if DEBUG:
self.environment.log_warning(
'Warning: unknown variable %r in the following line; line skipped:\n'
' %s\n'
% (e.args[0], line,)
)
else:
if name.lower() in ADDR_HEADERS:
value = addr_header_encode(value, name)
else:
value = header_encode(value, name)
for splitline in ('%s: %s\n' % (name, value)).splitlines(True):
yield splitline
def generate_email_header(self):
"""Generate the RFC 2822 email headers for this Change, a line at a time.
The output should not include the trailing blank line."""
raise NotImplementedError()
def generate_email_intro(self):
"""Generate the email intro for this Change, a line at a time.
The output will be used as the standard boilerplate at the top
of the email body."""
raise NotImplementedError()
def generate_email_body(self):
"""Generate the main part of the email body, a line at a time.
The text in the body might be truncated after a specified
number of lines (see multimailhook.emailmaxlines)."""
raise NotImplementedError()
def generate_email_footer(self):
"""Generate the footer of the email, a line at a time.
The footer is always included, irrespective of
multimailhook.emailmaxlines."""
raise NotImplementedError()
def generate_email(self, push, body_filter=None, extra_header_values={}):
"""Generate an email describing this change.
Iterate over the lines (including the header lines) of an
email describing this change. If body_filter is not None,
then use it to filter the lines that are intended for the
email body.
The extra_header_values field is received as a dict and not as
**kwargs, to allow passing other keyword arguments in the
future (e.g. passing extra values to generate_email_intro()"""
for line in self.generate_email_header(**extra_header_values):
yield line
yield '\n'
for line in self.generate_email_intro():
yield line
body = self.generate_email_body(push)
if body_filter is not None:
body = body_filter(body)
for line in body:
yield line
for line in self.generate_email_footer():
yield line
class Revision(Change):
"""A Change consisting of a single git commit."""
CC_RE = re.compile(r'^\s*C[Cc]:\s*(?P<to>[^#]+@[^\s#]*)\s*(#.*)?$')
def __init__(self, reference_change, rev, num, tot):
Change.__init__(self, reference_change.environment)
self.reference_change = reference_change
self.rev = rev
self.change_type = self.reference_change.change_type
self.refname = self.reference_change.refname
self.num = num
self.tot = tot
self.author = read_git_output(['log', '--no-walk', '--format=%aN <%aE>', self.rev.sha1])
self.recipients = self.environment.get_revision_recipients(self)
self.cc_recipients = ''
if self.environment.get_scancommitforcc():
self.cc_recipients = ', '.join(to.strip() for to in self._cc_recipients())
if self.cc_recipients:
self.environment.log_msg(
'Add %s to CC for %s\n' % (self.cc_recipients, self.rev.sha1))
def _cc_recipients(self):
cc_recipients = []
message = read_git_output(['log', '--no-walk', '--format=%b', self.rev.sha1])
lines = message.strip().split('\n')
for line in lines:
m = re.match(self.CC_RE, line)
if m:
cc_recipients.append(m.group('to'))
return cc_recipients
def _compute_values(self):
values = Change._compute_values(self)
oneline = read_git_output(
['log', '--format=%s', '--no-walk', self.rev.sha1]
)
values['rev'] = self.rev.sha1
values['rev_short'] = self.rev.short
values['change_type'] = self.change_type
values['refname'] = self.refname
values['short_refname'] = self.reference_change.short_refname
values['refname_type'] = self.reference_change.refname_type
values['reply_to_msgid'] = self.reference_change.msgid
values['num'] = self.num
values['tot'] = self.tot
values['recipients'] = self.recipients
if self.cc_recipients:
values['cc_recipients'] = self.cc_recipients
values['oneline'] = oneline
values['author'] = self.author
reply_to = self.environment.get_reply_to_commit(self)
if reply_to:
values['reply_to'] = reply_to
return values
def generate_email_header(self, **extra_values):
for line in self.expand_header_lines(
REVISION_HEADER_TEMPLATE, **extra_values
):
yield line
def generate_email_intro(self):
for line in self.expand_lines(REVISION_INTRO_TEMPLATE):
yield line
def generate_email_body(self, push):
"""Show this revision."""
return read_git_lines(
['log'] + self.environment.commitlogopts + ['-1', self.rev.sha1],
keepends=True,
)
def generate_email_footer(self):
return self.expand_lines(REVISION_FOOTER_TEMPLATE)
class ReferenceChange(Change):
"""A Change to a Git reference.
An abstract class representing a create, update, or delete of a
Git reference. Derived classes handle specific types of reference
(e.g., tags vs. branches). These classes generate the main
reference change email summarizing the reference change and
whether it caused any any commits to be added or removed.
ReferenceChange objects are usually created using the static
create() method, which has the logic to decide which derived class
to instantiate."""
REF_RE = re.compile(r'^refs\/(?P<area>[^\/]+)\/(?P<shortname>.*)$')
@staticmethod
def create(environment, oldrev, newrev, refname):
"""Return a ReferenceChange object representing the change.
Return an object that represents the type of change that is being
made. oldrev and newrev should be SHA1s or ZEROS."""
old = GitObject(oldrev)
new = GitObject(newrev)
rev = new or old
# The revision type tells us what type the commit is, combined with
# the location of the ref we can decide between
# - working branch
# - tracking branch
# - unannotated tag
# - annotated tag
m = ReferenceChange.REF_RE.match(refname)
if m:
area = m.group('area')
short_refname = m.group('shortname')
else:
area = ''
short_refname = refname
if rev.type == 'tag':
# Annotated tag:
klass = AnnotatedTagChange
elif rev.type == 'commit':
if area == 'tags':
# Non-annotated tag:
klass = NonAnnotatedTagChange
elif area == 'heads':
# Branch:
klass = BranchChange
elif area == 'remotes':
# Tracking branch:
environment.log_warning(
'*** Push-update of tracking branch %r\n'
'*** - incomplete email generated.\n'
% (refname,)
)
klass = OtherReferenceChange
else:
# Some other reference namespace:
environment.log_warning(
'*** Push-update of strange reference %r\n'
'*** - incomplete email generated.\n'
% (refname,)
)
klass = OtherReferenceChange
else:
# Anything else (is there anything else?)
environment.log_warning(
'*** Unknown type of update to %r (%s)\n'
'*** - incomplete email generated.\n'
% (refname, rev.type,)
)
klass = OtherReferenceChange
return klass(
environment,
refname=refname, short_refname=short_refname,
old=old, new=new, rev=rev,
)
def __init__(self, environment, refname, short_refname, old, new, rev):
Change.__init__(self, environment)
self.change_type = {
(False, True): 'create',
(True, True): 'update',
(True, False): 'delete',
}[bool(old), bool(new)]
self.refname = refname
self.short_refname = short_refname
self.old = old
self.new = new
self.rev = rev
self.msgid = make_msgid()
self.diffopts = environment.diffopts
self.graphopts = environment.graphopts
self.logopts = environment.logopts
self.commitlogopts = environment.commitlogopts
self.showgraph = environment.refchange_showgraph
self.showlog = environment.refchange_showlog
self.header_template = REFCHANGE_HEADER_TEMPLATE
self.intro_template = REFCHANGE_INTRO_TEMPLATE
self.footer_template = FOOTER_TEMPLATE
def _compute_values(self):
values = Change._compute_values(self)
values['change_type'] = self.change_type
values['refname_type'] = self.refname_type
values['refname'] = self.refname
values['short_refname'] = self.short_refname
values['msgid'] = self.msgid
values['recipients'] = self.recipients
values['oldrev'] = str(self.old)
values['oldrev_short'] = self.old.short
values['newrev'] = str(self.new)
values['newrev_short'] = self.new.short
if self.old:
values['oldrev_type'] = self.old.type
if self.new:
values['newrev_type'] = self.new.type
reply_to = self.environment.get_reply_to_refchange(self)
if reply_to:
values['reply_to'] = reply_to
return values
def send_single_combined_email(self, known_added_sha1s):
"""Determine if a combined refchange/revision email should be sent
If there is only a single new (non-merge) commit added by a
change, it is useful to combine the ReferenceChange and
Revision emails into one. In such a case, return the single
revision; otherwise, return None.
This method is overridden in BranchChange."""
return None
def generate_combined_email(self, push, revision, body_filter=None, extra_header_values={}):
"""Generate an email describing this change AND specified revision.
Iterate over the lines (including the header lines) of an
email describing this change. If body_filter is not None,
then use it to filter the lines that are intended for the
email body.
The extra_header_values field is received as a dict and not as
**kwargs, to allow passing other keyword arguments in the
future (e.g. passing extra values to generate_email_intro()
This method is overridden in BranchChange."""
raise NotImplementedError
def get_subject(self):
template = {
'create': REF_CREATED_SUBJECT_TEMPLATE,
'update': REF_UPDATED_SUBJECT_TEMPLATE,
'delete': REF_DELETED_SUBJECT_TEMPLATE,
}[self.change_type]
return self.expand(template)
def generate_email_header(self, **extra_values):
if 'subject' not in extra_values:
extra_values['subject'] = self.get_subject()
for line in self.expand_header_lines(
self.header_template, **extra_values
):
yield line
def generate_email_intro(self):
for line in self.expand_lines(self.intro_template):
yield line
def generate_email_body(self, push):
"""Call the appropriate body-generation routine.
Call one of generate_create_summary() /
generate_update_summary() / generate_delete_summary()."""
change_summary = {
'create': self.generate_create_summary,
'delete': self.generate_delete_summary,
'update': self.generate_update_summary,
}[self.change_type](push)
for line in change_summary:
yield line
for line in self.generate_revision_change_summary(push):
yield line
def generate_email_footer(self):
return self.expand_lines(self.footer_template)
def generate_revision_change_graph(self, push):
if self.showgraph:
args = ['--graph'] + self.graphopts
for newold in ('new', 'old'):
has_newold = False
spec = push.get_commits_spec(newold, self)
for line in git_log(spec, args=args, keepends=True):
if not has_newold:
has_newold = True
yield '\n'
yield 'Graph of %s commits:\n\n' % (
{'new': 'new', 'old': 'discarded'}[newold],)
yield ' ' + line
if has_newold:
yield '\n'
def generate_revision_change_log(self, new_commits_list):
if self.showlog:
yield '\n'
yield 'Detailed log of new commits:\n\n'
for line in read_git_lines(
['log', '--no-walk']
+ self.logopts
+ new_commits_list
+ ['--'],
keepends=True,
):
yield line
def generate_new_revision_summary(self, tot, new_commits_list, push):
for line in self.expand_lines(NEW_REVISIONS_TEMPLATE, tot=tot):
yield line
for line in self.generate_revision_change_graph(push):
yield line
for line in self.generate_revision_change_log(new_commits_list):
yield line
def generate_revision_change_summary(self, push):
"""Generate a summary of the revisions added/removed by this change."""
if self.new.commit_sha1 and not self.old.commit_sha1:
# A new reference was created. List the new revisions
# brought by the new reference (i.e., those revisions that
# were not in the repository before this reference
# change).
sha1s = list(push.get_new_commits(self))
sha1s.reverse()
tot = len(sha1s)
new_revisions = [
Revision(self, GitObject(sha1), num=i + 1, tot=tot)
for (i, sha1) in enumerate(sha1s)
]
if new_revisions:
yield self.expand('This %(refname_type)s includes the following new commits:\n')
yield '\n'
for r in new_revisions:
(sha1, subject) = r.rev.get_summary()
yield r.expand(
BRIEF_SUMMARY_TEMPLATE, action='new', text=subject,
)
yield '\n'
for line in self.generate_new_revision_summary(
tot, [r.rev.sha1 for r in new_revisions], push):
yield line
else:
for line in self.expand_lines(NO_NEW_REVISIONS_TEMPLATE):
yield line
elif self.new.commit_sha1 and self.old.commit_sha1:
# A reference was changed to point at a different commit.
# List the revisions that were removed and/or added *from
# that reference* by this reference change, along with a
# diff between the trees for its old and new values.
# List of the revisions that were added to the branch by
# this update. Note this list can include revisions that
# have already had notification emails; we want such
# revisions in the summary even though we will not send
# new notification emails for them.
adds = list(generate_summaries(
'--topo-order', '--reverse', '%s..%s'
% (self.old.commit_sha1, self.new.commit_sha1,)
))
# List of the revisions that were removed from the branch
# by this update. This will be empty except for
# non-fast-forward updates.
discards = list(generate_summaries(
'%s..%s' % (self.new.commit_sha1, self.old.commit_sha1,)
))
if adds:
new_commits_list = push.get_new_commits(self)
else:
new_commits_list = []
new_commits = CommitSet(new_commits_list)
if discards:
discarded_commits = CommitSet(push.get_discarded_commits(self))
else:
discarded_commits = CommitSet([])
if discards and adds:
for (sha1, subject) in discards:
if sha1 in discarded_commits:
action = 'discards'
else:
action = 'omits'
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action=action,
rev_short=sha1, text=subject,
)
for (sha1, subject) in adds:
if sha1 in new_commits:
action = 'new'
else:
action = 'adds'
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action=action,
rev_short=sha1, text=subject,
)
yield '\n'
for line in self.expand_lines(NON_FF_TEMPLATE):
yield line
elif discards:
for (sha1, subject) in discards:
if sha1 in discarded_commits:
action = 'discards'
else:
action = 'omits'
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action=action,
rev_short=sha1, text=subject,
)
yield '\n'
for line in self.expand_lines(REWIND_ONLY_TEMPLATE):
yield line
elif adds:
(sha1, subject) = self.old.get_summary()
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action='from',
rev_short=sha1, text=subject,
)
for (sha1, subject) in adds:
if sha1 in new_commits:
action = 'new'
else:
action = 'adds'
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action=action,
rev_short=sha1, text=subject,
)
yield '\n'
if new_commits:
for line in self.generate_new_revision_summary(
len(new_commits), new_commits_list, push):
yield line
else:
for line in self.expand_lines(NO_NEW_REVISIONS_TEMPLATE):
yield line
for line in self.generate_revision_change_graph(push):
yield line
# The diffstat is shown from the old revision to the new
# revision. This is to show the truth of what happened in
# this change. There's no point showing the stat from the
# base to the new revision because the base is effectively a
# random revision at this point - the user will be interested
# in what this revision changed - including the undoing of
# previous revisions in the case of non-fast-forward updates.
yield '\n'
yield 'Summary of changes:\n'
for line in read_git_lines(
['diff-tree']
+ self.diffopts
+ ['%s..%s' % (self.old.commit_sha1, self.new.commit_sha1,)],
keepends=True,
):
yield line
elif self.old.commit_sha1 and not self.new.commit_sha1:
# A reference was deleted. List the revisions that were
# removed from the repository by this reference change.
sha1s = list(push.get_discarded_commits(self))
tot = len(sha1s)
discarded_revisions = [
Revision(self, GitObject(sha1), num=i + 1, tot=tot)
for (i, sha1) in enumerate(sha1s)
]
if discarded_revisions:
for line in self.expand_lines(DISCARDED_REVISIONS_TEMPLATE):
yield line
yield '\n'
for r in discarded_revisions:
(sha1, subject) = r.rev.get_summary()
yield r.expand(
BRIEF_SUMMARY_TEMPLATE, action='discards', text=subject,
)
for line in self.generate_revision_change_graph(push):
yield line
else:
for line in self.expand_lines(NO_DISCARDED_REVISIONS_TEMPLATE):
yield line
elif not self.old.commit_sha1 and not self.new.commit_sha1:
for line in self.expand_lines(NON_COMMIT_UPDATE_TEMPLATE):
yield line
def generate_create_summary(self, push):
"""Called for the creation of a reference."""
# This is a new reference and so oldrev is not valid
(sha1, subject) = self.new.get_summary()
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action='at',
rev_short=sha1, text=subject,
)
yield '\n'
def generate_update_summary(self, push):
"""Called for the change of a pre-existing branch."""
return iter([])
def generate_delete_summary(self, push):
"""Called for the deletion of any type of reference."""
(sha1, subject) = self.old.get_summary()
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action='was',
rev_short=sha1, text=subject,
)
yield '\n'
class BranchChange(ReferenceChange):
refname_type = 'branch'
def __init__(self, environment, refname, short_refname, old, new, rev):
ReferenceChange.__init__(
self, environment,
refname=refname, short_refname=short_refname,
old=old, new=new, rev=rev,
)
self.recipients = environment.get_refchange_recipients(self)
self._single_revision = None
def send_single_combined_email(self, known_added_sha1s):
if not self.environment.combine_when_single_commit:
return None
# In the sadly-all-too-frequent usecase of people pushing only
# one of their commits at a time to a repository, users feel
# the reference change summary emails are noise rather than
# important signal. This is because, in this particular
# usecase, there is a reference change summary email for each
# new commit, and all these summaries do is point out that
# there is one new commit (which can readily be inferred by
# the existence of the individual revision email that is also
# sent). In such cases, our users prefer there to be a combined
# reference change summary/new revision email.
#
# So, if the change is an update and it doesn't discard any
# commits, and it adds exactly one non-merge commit (gerrit
# forces a workflow where every commit is individually merged
# and the git-multimail hook fired off for just this one
# change), then we send a combined refchange/revision email.
try:
# If this change is a reference update that doesn't discard
# any commits...
if self.change_type != 'update':
return None
if read_git_lines(
['merge-base', self.old.sha1, self.new.sha1]
) != [self.old.sha1]:
return None
# Check if this update introduced exactly one non-merge
# commit:
def split_line(line):
"""Split line into (sha1, [parent,...])."""
words = line.split()
return (words[0], words[1:])
# Get the new commits introduced by the push as a list of
# (sha1, [parent,...])
new_commits = [
split_line(line)
for line in read_git_lines(
[
'log', '-3', '--format=%H %P',
'%s..%s' % (self.old.sha1, self.new.sha1),
]
)
]
if not new_commits:
return None
# If the newest commit is a merge, save it for a later check
# but otherwise ignore it
merge = None
tot = len(new_commits)
if len(new_commits[0][1]) > 1:
merge = new_commits[0][0]
del new_commits[0]
# Our primary check: we can't combine if more than one commit
# is introduced. We also currently only combine if the new
# commit is a non-merge commit, though it may make sense to
# combine if it is a merge as well.
if not (
len(new_commits) == 1
and len(new_commits[0][1]) == 1
and new_commits[0][0] in known_added_sha1s
):
return None
# We do not want to combine revision and refchange emails if
# those go to separate locations.
rev = Revision(self, GitObject(new_commits[0][0]), 1, tot)
if rev.recipients != self.recipients:
return None
# We ignored the newest commit if it was just a merge of the one
# commit being introduced. But we don't want to ignore that
# merge commit it it involved conflict resolutions. Check that.
if merge and merge != read_git_output(['diff-tree', '--cc', merge]):
return None
# We can combine the refchange and one new revision emails
# into one. Return the Revision that a combined email should
# be sent about.
return rev
except CommandError:
# Cannot determine number of commits in old..new or new..old;
# don't combine reference/revision emails:
return None
def generate_combined_email(self, push, revision, body_filter=None, extra_header_values={}):
values = revision.get_values()
if extra_header_values:
values.update(extra_header_values)
if 'subject' not in extra_header_values:
values['subject'] = self.expand(COMBINED_REFCHANGE_REVISION_SUBJECT_TEMPLATE, **values)
self._single_revision = revision
self.header_template = COMBINED_HEADER_TEMPLATE
self.intro_template = COMBINED_INTRO_TEMPLATE
self.footer_template = COMBINED_FOOTER_TEMPLATE
for line in self.generate_email(push, body_filter, values):
yield line
def generate_email_body(self, push):
'''Call the appropriate body generation routine.
If this is a combined refchange/revision email, the special logic
for handling this combined email comes from this function. For
other cases, we just use the normal handling.'''
# If self._single_revision isn't set; don't override
if not self._single_revision:
for line in super(BranchChange, self).generate_email_body(push):
yield line
return
# This is a combined refchange/revision email; we first provide
# some info from the refchange portion, and then call the revision
# generate_email_body function to handle the revision portion.
adds = list(generate_summaries(
'--topo-order', '--reverse', '%s..%s'
% (self.old.commit_sha1, self.new.commit_sha1,)
))
yield self.expand("The following commit(s) were added to %(refname)s by this push:\n")
for (sha1, subject) in adds:
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action='new',
rev_short=sha1, text=subject,
)
yield self._single_revision.rev.short + " is described below\n"
yield '\n'
for line in self._single_revision.generate_email_body(push):
yield line
class AnnotatedTagChange(ReferenceChange):
refname_type = 'annotated tag'
def __init__(self, environment, refname, short_refname, old, new, rev):
ReferenceChange.__init__(
self, environment,
refname=refname, short_refname=short_refname,
old=old, new=new, rev=rev,
)
self.recipients = environment.get_announce_recipients(self)
self.show_shortlog = environment.announce_show_shortlog
ANNOTATED_TAG_FORMAT = (
'%(*objectname)\n'
'%(*objecttype)\n'
'%(taggername)\n'
'%(taggerdate)'
)
def describe_tag(self, push):
"""Describe the new value of an annotated tag."""
# Use git for-each-ref to pull out the individual fields from
# the tag
[tagobject, tagtype, tagger, tagged] = read_git_lines(
['for-each-ref', '--format=%s' % (self.ANNOTATED_TAG_FORMAT,), self.refname],
)
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action='tagging',
rev_short=tagobject, text='(%s)' % (tagtype,),
)
if tagtype == 'commit':
# If the tagged object is a commit, then we assume this is a
# release, and so we calculate which tag this tag is
# replacing
try:
prevtag = read_git_output(['describe', '--abbrev=0', '%s^' % (self.new,)])
except CommandError:
prevtag = None
if prevtag:
yield ' replaces %s\n' % (prevtag,)
else:
prevtag = None
yield ' length %s bytes\n' % (read_git_output(['cat-file', '-s', tagobject]),)
yield ' tagged by %s\n' % (tagger,)
yield ' on %s\n' % (tagged,)
yield '\n'
# Show the content of the tag message; this might contain a
# change log or release notes so is worth displaying.
yield LOGBEGIN
contents = list(read_git_lines(['cat-file', 'tag', self.new.sha1], keepends=True))
contents = contents[contents.index('\n') + 1:]
if contents and contents[-1][-1:] != '\n':
contents.append('\n')
for line in contents:
yield line
if self.show_shortlog and tagtype == 'commit':
# Only commit tags make sense to have rev-list operations
# performed on them
yield '\n'
if prevtag:
# Show changes since the previous release
revlist = read_git_output(
['rev-list', '--pretty=short', '%s..%s' % (prevtag, self.new,)],
keepends=True,
)
else:
# No previous tag, show all the changes since time
# began
revlist = read_git_output(
['rev-list', '--pretty=short', '%s' % (self.new,)],
keepends=True,
)
for line in read_git_lines(['shortlog'], input=revlist, keepends=True):
yield line
yield LOGEND
yield '\n'
def generate_create_summary(self, push):
"""Called for the creation of an annotated tag."""
for line in self.expand_lines(TAG_CREATED_TEMPLATE):
yield line
for line in self.describe_tag(push):
yield line
def generate_update_summary(self, push):
"""Called for the update of an annotated tag.
This is probably a rare event and may not even be allowed."""
for line in self.expand_lines(TAG_UPDATED_TEMPLATE):
yield line
for line in self.describe_tag(push):
yield line
def generate_delete_summary(self, push):
"""Called when a non-annotated reference is updated."""
for line in self.expand_lines(TAG_DELETED_TEMPLATE):
yield line
yield self.expand(' tag was %(oldrev_short)s\n')
yield '\n'
class NonAnnotatedTagChange(ReferenceChange):
refname_type = 'tag'
def __init__(self, environment, refname, short_refname, old, new, rev):
ReferenceChange.__init__(
self, environment,
refname=refname, short_refname=short_refname,
old=old, new=new, rev=rev,
)
self.recipients = environment.get_refchange_recipients(self)
def generate_create_summary(self, push):
"""Called for the creation of an annotated tag."""
for line in self.expand_lines(TAG_CREATED_TEMPLATE):
yield line
def generate_update_summary(self, push):
"""Called when a non-annotated reference is updated."""
for line in self.expand_lines(TAG_UPDATED_TEMPLATE):
yield line
def generate_delete_summary(self, push):
"""Called when a non-annotated reference is updated."""
for line in self.expand_lines(TAG_DELETED_TEMPLATE):
yield line
for line in ReferenceChange.generate_delete_summary(self, push):
yield line
class OtherReferenceChange(ReferenceChange):
refname_type = 'reference'
def __init__(self, environment, refname, short_refname, old, new, rev):
# We use the full refname as short_refname, because otherwise
# the full name of the reference would not be obvious from the
# text of the email.
ReferenceChange.__init__(
self, environment,
refname=refname, short_refname=refname,
old=old, new=new, rev=rev,
)
self.recipients = environment.get_refchange_recipients(self)
class Mailer(object):
"""An object that can send emails."""
def send(self, lines, to_addrs):
"""Send an email consisting of lines.
lines must be an iterable over the lines constituting the
header and body of the email. to_addrs is a list of recipient
addresses (can be needed even if lines already contains a
"To:" field). It can be either a string (comma-separated list
of email addresses) or a Python list of individual email
addresses.
"""
raise NotImplementedError()
class SendMailer(Mailer):
"""Send emails using 'sendmail -oi -t'."""
SENDMAIL_CANDIDATES = [
'/usr/sbin/sendmail',
'/usr/lib/sendmail',
]
@staticmethod
def find_sendmail():
for path in SendMailer.SENDMAIL_CANDIDATES:
if os.access(path, os.X_OK):
return path
else:
raise ConfigurationException(
'No sendmail executable found. '
'Try setting multimailhook.sendmailCommand.'
)
def __init__(self, command=None, envelopesender=None):
"""Construct a SendMailer instance.
command should be the command and arguments used to invoke
sendmail, as a list of strings. If an envelopesender is
provided, it will also be passed to the command, via '-f
envelopesender'."""
if command:
self.command = command[:]
else:
self.command = [self.find_sendmail(), '-oi', '-t']
if envelopesender:
self.command.extend(['-f', envelopesender])
def send(self, lines, to_addrs):
try:
p = subprocess.Popen(self.command, stdin=subprocess.PIPE)
except OSError, e:
sys.stderr.write(
'*** Cannot execute command: %s\n' % ' '.join(self.command)
+ '*** %s\n' % str(e)
+ '*** Try setting multimailhook.mailer to "smtp"\n'
'*** to send emails without using the sendmail command.\n'
)
sys.exit(1)
try:
p.stdin.writelines(lines)
except Exception, e:
sys.stderr.write(
'*** Error while generating commit email\n'
'*** - mail sending aborted.\n'
)
try:
# subprocess.terminate() is not available in Python 2.4
p.terminate()
except AttributeError:
pass
raise e
else:
p.stdin.close()
retcode = p.wait()
if retcode:
raise CommandError(self.command, retcode)
class SMTPMailer(Mailer):
"""Send emails using Python's smtplib."""
def __init__(self, envelopesender, smtpserver,
smtpservertimeout=10.0, smtpserverdebuglevel=0,
smtpencryption='none',
smtpuser='', smtppass='',
):
if not envelopesender:
sys.stderr.write(
'fatal: git_multimail: cannot use SMTPMailer without a sender address.\n'
'please set either multimailhook.envelopeSender or user.email\n'
)
sys.exit(1)
if smtpencryption == 'ssl' and not (smtpuser and smtppass):
raise ConfigurationException(
'Cannot use SMTPMailer with security option ssl '
'without options username and password.'
)
self.envelopesender = envelopesender
self.smtpserver = smtpserver
self.smtpservertimeout = smtpservertimeout
self.smtpserverdebuglevel = smtpserverdebuglevel
self.security = smtpencryption
self.username = smtpuser
self.password = smtppass
try:
def call(klass, server, timeout):
try:
return klass(server, timeout=timeout)
except TypeError:
# Old Python versions do not have timeout= argument.
return klass(server)
if self.security == 'none':
self.smtp = call(smtplib.SMTP, self.smtpserver, timeout=self.smtpservertimeout)
elif self.security == 'ssl':
self.smtp = call(smtplib.SMTP_SSL, self.smtpserver, timeout=self.smtpservertimeout)
elif self.security == 'tls':
if ':' not in self.smtpserver:
self.smtpserver += ':587' # default port for TLS
self.smtp = call(smtplib.SMTP, self.smtpserver, timeout=self.smtpservertimeout)
self.smtp.ehlo()
self.smtp.starttls()
self.smtp.ehlo()
else:
sys.stdout.write('*** Error: Control reached an invalid option. ***')
sys.exit(1)
if self.smtpserverdebuglevel > 0:
sys.stdout.write(
"*** Setting debug on for SMTP server connection (%s) ***\n"
% self.smtpserverdebuglevel)
self.smtp.set_debuglevel(self.smtpserverdebuglevel)
except Exception, e:
sys.stderr.write(
'*** Error establishing SMTP connection to %s ***\n'
% self.smtpserver)
sys.stderr.write('*** %s\n' % str(e))
sys.exit(1)
def __del__(self):
if hasattr(self, 'smtp'):
self.smtp.quit()
def send(self, lines, to_addrs):
try:
if self.username or self.password:
sys.stderr.write("*** Authenticating as %s ***\n" % self.username)
self.smtp.login(self.username, self.password)
msg = ''.join(lines)
# turn comma-separated list into Python list if needed.
if isinstance(to_addrs, basestring):
to_addrs = [email for (name, email) in getaddresses([to_addrs])]
self.smtp.sendmail(self.envelopesender, to_addrs, msg)
except Exception, e:
sys.stderr.write('*** Error sending email ***\n')
sys.stderr.write('*** %s\n' % str(e))
self.smtp.quit()
sys.exit(1)
class OutputMailer(Mailer):
"""Write emails to an output stream, bracketed by lines of '=' characters.
This is intended for debugging purposes."""
SEPARATOR = '=' * 75 + '\n'
def __init__(self, f):
self.f = f
def send(self, lines, to_addrs):
self.f.write(self.SEPARATOR)
self.f.writelines(lines)
self.f.write(self.SEPARATOR)
def get_git_dir():
"""Determine GIT_DIR.
Determine GIT_DIR either from the GIT_DIR environment variable or
from the working directory, using Git's usual rules."""
try:
return read_git_output(['rev-parse', '--git-dir'])
except CommandError:
sys.stderr.write('fatal: git_multimail: not in a git directory\n')
sys.exit(1)
class Environment(object):
"""Describes the environment in which the push is occurring.
An Environment object encapsulates information about the local
environment. For example, it knows how to determine:
* the name of the repository to which the push occurred
* what user did the push
* what users want to be informed about various types of changes.
An Environment object is expected to have the following methods:
get_repo_shortname()
Return a short name for the repository, for display
purposes.
get_repo_path()
Return the absolute path to the Git repository.
get_emailprefix()
Return a string that will be prefixed to every email's
subject.
get_pusher()
Return the username of the person who pushed the changes.
This value is used in the email body to indicate who
pushed the change.
get_pusher_email() (may return None)
Return the email address of the person who pushed the
changes. The value should be a single RFC 2822 email
address as a string; e.g., "Joe User <user@example.com>"
if available, otherwise "user@example.com". If set, the
value is used as the Reply-To address for refchange
emails. If it is impossible to determine the pusher's
email, this attribute should be set to None (in which case
no Reply-To header will be output).
get_sender()
Return the address to be used as the 'From' email address
in the email envelope.
get_fromaddr()
Return the 'From' email address used in the email 'From:'
headers. (May be a full RFC 2822 email address like 'Joe
User <user@example.com>'.)
get_administrator()
Return the name and/or email of the repository
administrator. This value is used in the footer as the
person to whom requests to be removed from the
notification list should be sent. Ideally, it should
include a valid email address.
get_reply_to_refchange()
get_reply_to_commit()
Return the address to use in the email "Reply-To" header,
as a string. These can be an RFC 2822 email address, or
None to omit the "Reply-To" header.
get_reply_to_refchange() is used for refchange emails;
get_reply_to_commit() is used for individual commit
emails.
They should also define the following attributes:
announce_show_shortlog (bool)
True iff announce emails should include a shortlog.
refchange_showgraph (bool)
True iff refchanges emails should include a detailed graph.
refchange_showlog (bool)
True iff refchanges emails should include a detailed log.
diffopts (list of strings)
The options that should be passed to 'git diff' for the
summary email. The value should be a list of strings
representing words to be passed to the command.
graphopts (list of strings)
Analogous to diffopts, but contains options passed to
'git log --graph' when generating the detailed graph for
a set of commits (see refchange_showgraph)
logopts (list of strings)
Analogous to diffopts, but contains options passed to
'git log' when generating the detailed log for a set of
commits (see refchange_showlog)
commitlogopts (list of strings)
The options that should be passed to 'git log' for each
commit mail. The value should be a list of strings
representing words to be passed to the command.
quiet (bool)
On success do not write to stderr
stdout (bool)
Write email to stdout rather than emailing. Useful for debugging
combine_when_single_commit (bool)
True if a combined email should be produced when a single
new commit is pushed to a branch, False otherwise.
"""
REPO_NAME_RE = re.compile(r'^(?P<name>.+?)(?:\.git)$')
def __init__(self, osenv=None):
self.osenv = osenv or os.environ
self.announce_show_shortlog = False
self.maxcommitemails = 500
self.diffopts = ['--stat', '--summary', '--find-copies-harder']
self.graphopts = ['--oneline', '--decorate']
self.logopts = []
self.refchange_showgraph = False
self.refchange_showlog = False
self.commitlogopts = ['-C', '--stat', '-p', '--cc']
self.quiet = False
self.stdout = False
self.combine_when_single_commit = True
self.COMPUTED_KEYS = [
'administrator',
'charset',
'emailprefix',
'fromaddr',
'pusher',
'pusher_email',
'repo_path',
'repo_shortname',
'sender',
]
self._values = None
def get_repo_shortname(self):
"""Use the last part of the repo path, with ".git" stripped off if present."""
basename = os.path.basename(os.path.abspath(self.get_repo_path()))
m = self.REPO_NAME_RE.match(basename)
if m:
return m.group('name')
else:
return basename
def get_pusher(self):
raise NotImplementedError()
def get_pusher_email(self):
return None
def get_fromaddr(self):
config = Config('user')
fromname = config.get('name', default='')
fromemail = config.get('email', default='')
if fromemail:
return formataddr([fromname, fromemail])
return self.get_sender()
def get_administrator(self):
return 'the administrator of this repository'
def get_emailprefix(self):
return ''
def get_repo_path(self):
if read_git_output(['rev-parse', '--is-bare-repository']) == 'true':
path = get_git_dir()
else:
path = read_git_output(['rev-parse', '--show-toplevel'])
return os.path.abspath(path)
def get_charset(self):
return CHARSET
def get_values(self):
"""Return a dictionary {keyword: expansion} for this Environment.
This method is called by Change._compute_values(). The keys
in the returned dictionary are available to be used in any of
the templates. The dictionary is created by calling
self.get_NAME() for each of the attributes named in
COMPUTED_KEYS and recording those that do not return None.
The return value is always a new dictionary."""
if self._values is None:
values = {}
for key in self.COMPUTED_KEYS:
value = getattr(self, 'get_%s' % (key,))()
if value is not None:
values[key] = value
self._values = values
return self._values.copy()
def get_refchange_recipients(self, refchange):
"""Return the recipients for notifications about refchange.
Return the list of email addresses to which notifications
about the specified ReferenceChange should be sent."""
raise NotImplementedError()
def get_announce_recipients(self, annotated_tag_change):
"""Return the recipients for notifications about annotated_tag_change.
Return the list of email addresses to which notifications
about the specified AnnotatedTagChange should be sent."""
raise NotImplementedError()
def get_reply_to_refchange(self, refchange):
return self.get_pusher_email()
def get_revision_recipients(self, revision):
"""Return the recipients for messages about revision.
Return the list of email addresses to which notifications
about the specified Revision should be sent. This method
could be overridden, for example, to take into account the
contents of the revision when deciding whom to notify about
it. For example, there could be a scheme for users to express
interest in particular files or subdirectories, and only
receive notification emails for revisions that affecting those
files."""
raise NotImplementedError()
def get_reply_to_commit(self, revision):
return revision.author
def filter_body(self, lines):
"""Filter the lines intended for an email body.
lines is an iterable over the lines that would go into the
email body. Filter it (e.g., limit the number of lines, the
line length, character set, etc.), returning another iterable.
See FilterLinesEnvironmentMixin and MaxlinesEnvironmentMixin
for classes implementing this functionality."""
return lines
def log_msg(self, msg):
"""Write the string msg on a log file or on stderr.
Sends the text to stderr by default, override to change the behavior."""
sys.stderr.write(msg)
def log_warning(self, msg):
"""Write the string msg on a log file or on stderr.
Sends the text to stderr by default, override to change the behavior."""
sys.stderr.write(msg)
def log_error(self, msg):
"""Write the string msg on a log file or on stderr.
Sends the text to stderr by default, override to change the behavior."""
sys.stderr.write(msg)
class ConfigEnvironmentMixin(Environment):
"""A mixin that sets self.config to its constructor's config argument.
This class's constructor consumes the "config" argument.
Mixins that need to inspect the config should inherit from this
class (1) to make sure that "config" is still in the constructor
arguments with its own constructor runs and/or (2) to be sure that
self.config is set after construction."""
def __init__(self, config, **kw):
super(ConfigEnvironmentMixin, self).__init__(**kw)
self.config = config
class ConfigOptionsEnvironmentMixin(ConfigEnvironmentMixin):
"""An Environment that reads most of its information from "git config"."""
def __init__(self, config, **kw):
super(ConfigOptionsEnvironmentMixin, self).__init__(
config=config, **kw
)
for var, cfg in (
('announce_show_shortlog', 'announceshortlog'),
('refchange_showgraph', 'refchangeShowGraph'),
('refchange_showlog', 'refchangeshowlog'),
('quiet', 'quiet'),
('stdout', 'stdout'),
):
val = config.get_bool(cfg)
if val is not None:
setattr(self, var, val)
maxcommitemails = config.get('maxcommitemails')
if maxcommitemails is not None:
try:
self.maxcommitemails = int(maxcommitemails)
except ValueError:
self.log_warning(
'*** Malformed value for multimailhook.maxCommitEmails: %s\n' % maxcommitemails
+ '*** Expected a number. Ignoring.\n'
)
diffopts = config.get('diffopts')
if diffopts is not None:
self.diffopts = shlex.split(diffopts)
graphopts = config.get('graphOpts')
if graphopts is not None:
self.graphopts = shlex.split(graphopts)
logopts = config.get('logopts')
if logopts is not None:
self.logopts = shlex.split(logopts)
commitlogopts = config.get('commitlogopts')
if commitlogopts is not None:
self.commitlogopts = shlex.split(commitlogopts)
reply_to = config.get('replyTo')
self.__reply_to_refchange = config.get('replyToRefchange', default=reply_to)
if (
self.__reply_to_refchange is not None
and self.__reply_to_refchange.lower() == 'author'
):
raise ConfigurationException(
'"author" is not an allowed setting for replyToRefchange'
)
self.__reply_to_commit = config.get('replyToCommit', default=reply_to)
combine = config.get_bool('combineWhenSingleCommit')
if combine is not None:
self.combine_when_single_commit = combine
def get_administrator(self):
return (
self.config.get('administrator')
or self.get_sender()
or super(ConfigOptionsEnvironmentMixin, self).get_administrator()
)
def get_repo_shortname(self):
return (
self.config.get('reponame')
or super(ConfigOptionsEnvironmentMixin, self).get_repo_shortname()
)
def get_emailprefix(self):
emailprefix = self.config.get('emailprefix')
if emailprefix is not None:
emailprefix = emailprefix.strip()
if emailprefix:
return emailprefix + ' '
else:
return ''
else:
return '[%s] ' % (self.get_repo_shortname(),)
def get_sender(self):
return self.config.get('envelopesender')
def get_fromaddr(self):
fromaddr = self.config.get('from')
if fromaddr:
return fromaddr
return super(ConfigOptionsEnvironmentMixin, self).get_fromaddr()
def get_reply_to_refchange(self, refchange):
if self.__reply_to_refchange is None:
return super(ConfigOptionsEnvironmentMixin, self).get_reply_to_refchange(refchange)
elif self.__reply_to_refchange.lower() == 'pusher':
return self.get_pusher_email()
elif self.__reply_to_refchange.lower() == 'none':
return None
else:
return self.__reply_to_refchange
def get_reply_to_commit(self, revision):
if self.__reply_to_commit is None:
return super(ConfigOptionsEnvironmentMixin, self).get_reply_to_commit(revision)
elif self.__reply_to_commit.lower() == 'author':
return revision.author
elif self.__reply_to_commit.lower() == 'pusher':
return self.get_pusher_email()
elif self.__reply_to_commit.lower() == 'none':
return None
else:
return self.__reply_to_commit
def get_scancommitforcc(self):
return self.config.get('scancommitforcc')
class FilterLinesEnvironmentMixin(Environment):
"""Handle encoding and maximum line length of body lines.
emailmaxlinelength (int or None)
The maximum length of any single line in the email body.
Longer lines are truncated at that length with ' [...]'
appended.
strict_utf8 (bool)
If this field is set to True, then the email body text is
expected to be UTF-8. Any invalid characters are
converted to U+FFFD, the Unicode replacement character
(encoded as UTF-8, of course).
"""
def __init__(self, strict_utf8=True, emailmaxlinelength=500, **kw):
super(FilterLinesEnvironmentMixin, self).__init__(**kw)
self.__strict_utf8 = strict_utf8
self.__emailmaxlinelength = emailmaxlinelength
def filter_body(self, lines):
lines = super(FilterLinesEnvironmentMixin, self).filter_body(lines)
if self.__strict_utf8:
lines = (line.decode(ENCODING, 'replace') for line in lines)
# Limit the line length in Unicode-space to avoid
# splitting characters:
if self.__emailmaxlinelength:
lines = limit_linelength(lines, self.__emailmaxlinelength)
lines = (line.encode(ENCODING, 'replace') for line in lines)
elif self.__emailmaxlinelength:
lines = limit_linelength(lines, self.__emailmaxlinelength)
return lines
class ConfigFilterLinesEnvironmentMixin(
ConfigEnvironmentMixin,
FilterLinesEnvironmentMixin,
):
"""Handle encoding and maximum line length based on config."""
def __init__(self, config, **kw):
strict_utf8 = config.get_bool('emailstrictutf8', default=None)
if strict_utf8 is not None:
kw['strict_utf8'] = strict_utf8
emailmaxlinelength = config.get('emailmaxlinelength')
if emailmaxlinelength is not None:
kw['emailmaxlinelength'] = int(emailmaxlinelength)
super(ConfigFilterLinesEnvironmentMixin, self).__init__(
config=config, **kw
)
class MaxlinesEnvironmentMixin(Environment):
"""Limit the email body to a specified number of lines."""
def __init__(self, emailmaxlines, **kw):
super(MaxlinesEnvironmentMixin, self).__init__(**kw)
self.__emailmaxlines = emailmaxlines
def filter_body(self, lines):
lines = super(MaxlinesEnvironmentMixin, self).filter_body(lines)
if self.__emailmaxlines:
lines = limit_lines(lines, self.__emailmaxlines)
return lines
class ConfigMaxlinesEnvironmentMixin(
ConfigEnvironmentMixin,
MaxlinesEnvironmentMixin,
):
"""Limit the email body to the number of lines specified in config."""
def __init__(self, config, **kw):
emailmaxlines = int(config.get('emailmaxlines', default='0'))
super(ConfigMaxlinesEnvironmentMixin, self).__init__(
config=config,
emailmaxlines=emailmaxlines,
**kw
)
class FQDNEnvironmentMixin(Environment):
"""A mixin that sets the host's FQDN to its constructor argument."""
def __init__(self, fqdn, **kw):
super(FQDNEnvironmentMixin, self).__init__(**kw)
self.COMPUTED_KEYS += ['fqdn']
self.__fqdn = fqdn
def get_fqdn(self):
"""Return the fully-qualified domain name for this host.
Return None if it is unavailable or unwanted."""
return self.__fqdn
class ConfigFQDNEnvironmentMixin(
ConfigEnvironmentMixin,
FQDNEnvironmentMixin,
):
"""Read the FQDN from the config."""
def __init__(self, config, **kw):
fqdn = config.get('fqdn')
super(ConfigFQDNEnvironmentMixin, self).__init__(
config=config,
fqdn=fqdn,
**kw
)
class ComputeFQDNEnvironmentMixin(FQDNEnvironmentMixin):
"""Get the FQDN by calling socket.getfqdn()."""
def __init__(self, **kw):
super(ComputeFQDNEnvironmentMixin, self).__init__(
fqdn=socket.getfqdn(),
**kw
)
class PusherDomainEnvironmentMixin(ConfigEnvironmentMixin):
"""Deduce pusher_email from pusher by appending an emaildomain."""
def __init__(self, **kw):
super(PusherDomainEnvironmentMixin, self).__init__(**kw)
self.__emaildomain = self.config.get('emaildomain')
def get_pusher_email(self):
if self.__emaildomain:
# Derive the pusher's full email address in the default way:
return '%s@%s' % (self.get_pusher(), self.__emaildomain)
else:
return super(PusherDomainEnvironmentMixin, self).get_pusher_email()
class StaticRecipientsEnvironmentMixin(Environment):
"""Set recipients statically based on constructor parameters."""
def __init__(
self,
refchange_recipients, announce_recipients, revision_recipients, scancommitforcc,
**kw
):
super(StaticRecipientsEnvironmentMixin, self).__init__(**kw)
# The recipients for various types of notification emails, as
# RFC 2822 email addresses separated by commas (or the empty
# string if no recipients are configured). Although there is
# a mechanism to choose the recipient lists based on on the
# actual *contents* of the change being reported, we only
# choose based on the *type* of the change. Therefore we can
# compute them once and for all:
if not (refchange_recipients
or announce_recipients
or revision_recipients
or scancommitforcc):
raise ConfigurationException('No email recipients configured!')
self.__refchange_recipients = refchange_recipients
self.__announce_recipients = announce_recipients
self.__revision_recipients = revision_recipients
def get_refchange_recipients(self, refchange):
return self.__refchange_recipients
def get_announce_recipients(self, annotated_tag_change):
return self.__announce_recipients
def get_revision_recipients(self, revision):
return self.__revision_recipients
class ConfigRecipientsEnvironmentMixin(
ConfigEnvironmentMixin,
StaticRecipientsEnvironmentMixin
):
"""Determine recipients statically based on config."""
def __init__(self, config, **kw):
super(ConfigRecipientsEnvironmentMixin, self).__init__(
config=config,
refchange_recipients=self._get_recipients(
config, 'refchangelist', 'mailinglist',
),
announce_recipients=self._get_recipients(
config, 'announcelist', 'refchangelist', 'mailinglist',
),
revision_recipients=self._get_recipients(
config, 'commitlist', 'mailinglist',
),
scancommitforcc=config.get('scancommitforcc'),
**kw
)
def _get_recipients(self, config, *names):
"""Return the recipients for a particular type of message.
Return the list of email addresses to which a particular type
of notification email should be sent, by looking at the config
value for "multimailhook.$name" for each of names. Use the
value from the first name that is configured. The return
value is a (possibly empty) string containing RFC 2822 email
addresses separated by commas. If no configuration could be
found, raise a ConfigurationException."""
for name in names:
retval = config.get_recipients(name)
if retval is not None:
return retval
else:
return ''
class ProjectdescEnvironmentMixin(Environment):
"""Make a "projectdesc" value available for templates.
By default, it is set to the first line of $GIT_DIR/description
(if that file is present and appears to be set meaningfully)."""
def __init__(self, **kw):
super(ProjectdescEnvironmentMixin, self).__init__(**kw)
self.COMPUTED_KEYS += ['projectdesc']
def get_projectdesc(self):
"""Return a one-line descripition of the project."""
git_dir = get_git_dir()
try:
projectdesc = open(os.path.join(git_dir, 'description')).readline().strip()
if projectdesc and not projectdesc.startswith('Unnamed repository'):
return projectdesc
except IOError:
pass
return 'UNNAMED PROJECT'
class GenericEnvironmentMixin(Environment):
def get_pusher(self):
return self.osenv.get('USER', self.osenv.get('USERNAME', 'unknown user'))
class GenericEnvironment(
ProjectdescEnvironmentMixin,
ConfigMaxlinesEnvironmentMixin,
ComputeFQDNEnvironmentMixin,
ConfigFilterLinesEnvironmentMixin,
ConfigRecipientsEnvironmentMixin,
PusherDomainEnvironmentMixin,
ConfigOptionsEnvironmentMixin,
GenericEnvironmentMixin,
Environment,
):
pass
class GitoliteEnvironmentMixin(Environment):
def get_repo_shortname(self):
# The gitolite environment variable $GL_REPO is a pretty good
# repo_shortname (though it's probably not as good as a value
# the user might have explicitly put in his config).
return (
self.osenv.get('GL_REPO', None)
or super(GitoliteEnvironmentMixin, self).get_repo_shortname()
)
def get_pusher(self):
return self.osenv.get('GL_USER', 'unknown user')
def get_fromaddr(self):
GL_USER = self.osenv.get('GL_USER')
if GL_USER is not None:
# Find the path to gitolite.conf. Note that gitolite v3
# did away with the GL_ADMINDIR and GL_CONF environment
# variables (they are now hard-coded).
GL_ADMINDIR = self.osenv.get(
'GL_ADMINDIR',
os.path.expanduser(os.path.join('~', '.gitolite')))
GL_CONF = self.osenv.get(
'GL_CONF',
os.path.join(GL_ADMINDIR, 'conf', 'gitolite.conf'))
if os.path.isfile(GL_CONF):
f = open(GL_CONF, 'rU')
try:
in_user_emails_section = False
re_template = r'^\s*#\s*{}\s*$'
re_begin, re_user, re_end = (
re.compile(re_template.format(x))
for x in (
r'BEGIN\s+USER\s+EMAILS',
re.escape(GL_USER) + r'\s+(.*)',
r'END\s+USER\s+EMAILS',
))
for l in f:
l = l.rstrip('\n')
if not in_user_emails_section:
if re_begin.match(l):
in_user_emails_section = True
continue
if re_end.match(l):
break
m = re_user.match(l)
if m:
return m.group(1)
finally:
f.close()
return super(GitoliteEnvironmentMixin, self).get_fromaddr()
class IncrementalDateTime(object):
"""Simple wrapper to give incremental date/times.
Each call will result in a date/time a second later than the
previous call. This can be used to falsify email headers, to
increase the likelihood that email clients sort the emails
correctly."""
def __init__(self):
self.time = time.time()
def next(self):
formatted = formatdate(self.time, True)
self.time += 1
return formatted
class GitoliteEnvironment(
ProjectdescEnvironmentMixin,
ConfigMaxlinesEnvironmentMixin,
ComputeFQDNEnvironmentMixin,
ConfigFilterLinesEnvironmentMixin,
ConfigRecipientsEnvironmentMixin,
PusherDomainEnvironmentMixin,
ConfigOptionsEnvironmentMixin,
GitoliteEnvironmentMixin,
Environment,
):
pass
class Push(object):
"""Represent an entire push (i.e., a group of ReferenceChanges).
It is easy to figure out what commits were added to a *branch* by
a Reference change:
git rev-list change.old..change.new
or removed from a *branch*:
git rev-list change.new..change.old
But it is not quite so trivial to determine which entirely new
commits were added to the *repository* by a push and which old
commits were discarded by a push. A big part of the job of this
class is to figure out these things, and to make sure that new
commits are only detailed once even if they were added to multiple
references.
The first step is to determine the "other" references--those
unaffected by the current push. They are computed by listing all
references then removing any affected by this push. The results
are stored in Push._other_ref_sha1s.
The commits contained in the repository before this push were
git rev-list other1 other2 other3 ... change1.old change2.old ...
Where "changeN.old" is the old value of one of the references
affected by this push.
The commits contained in the repository after this push are
git rev-list other1 other2 other3 ... change1.new change2.new ...
The commits added by this push are the difference between these
two sets, which can be written
git rev-list \
^other1 ^other2 ... \
^change1.old ^change2.old ... \
change1.new change2.new ...
The commits removed by this push can be computed by
git rev-list \
^other1 ^other2 ... \
^change1.new ^change2.new ... \
change1.old change2.old ...
The last point is that it is possible that other pushes are
occurring simultaneously to this one, so reference values can
change at any time. It is impossible to eliminate all race
conditions, but we reduce the window of time during which problems
can occur by translating reference names to SHA1s as soon as
possible and working with SHA1s thereafter (because SHA1s are
immutable)."""
# A map {(changeclass, changetype): integer} specifying the order
# that reference changes will be processed if multiple reference
# changes are included in a single push. The order is significant
# mostly because new commit notifications are threaded together
# with the first reference change that includes the commit. The
# following order thus causes commits to be grouped with branch
# changes (as opposed to tag changes) if possible.
SORT_ORDER = dict(
(value, i) for (i, value) in enumerate([
(BranchChange, 'update'),
(BranchChange, 'create'),
(AnnotatedTagChange, 'update'),
(AnnotatedTagChange, 'create'),
(NonAnnotatedTagChange, 'update'),
(NonAnnotatedTagChange, 'create'),
(BranchChange, 'delete'),
(AnnotatedTagChange, 'delete'),
(NonAnnotatedTagChange, 'delete'),
(OtherReferenceChange, 'update'),
(OtherReferenceChange, 'create'),
(OtherReferenceChange, 'delete'),
])
)
def __init__(self, changes, ignore_other_refs=False):
self.changes = sorted(changes, key=self._sort_key)
self.__other_ref_sha1s = None
self.__cached_commits_spec = {}
if ignore_other_refs:
self.__other_ref_sha1s = set()
@classmethod
def _sort_key(klass, change):
return (klass.SORT_ORDER[change.__class__, change.change_type], change.refname,)
@property
def _other_ref_sha1s(self):
"""The GitObjects referred to by references unaffected by this push.
"""
if self.__other_ref_sha1s is None:
# The refnames being changed by this push:
updated_refs = set(
change.refname
for change in self.changes
)
# The SHA-1s of commits referred to by all references in this
# repository *except* updated_refs:
sha1s = set()
fmt = (
'%(objectname) %(objecttype) %(refname)\n'
'%(*objectname) %(*objecttype) %(refname)'
)
for line in read_git_lines(
['for-each-ref', '--format=%s' % (fmt,)]):
(sha1, type, name) = line.split(' ', 2)
if sha1 and type == 'commit' and name not in updated_refs:
sha1s.add(sha1)
self.__other_ref_sha1s = sha1s
return self.__other_ref_sha1s
def _get_commits_spec_incl(self, new_or_old, reference_change=None):
"""Get new or old SHA-1 from one or each of the changed refs.
Return a list of SHA-1 commit identifier strings suitable as
arguments to 'git rev-list' (or 'git log' or ...). The
returned identifiers are either the old or new values from one
or all of the changed references, depending on the values of
new_or_old and reference_change.
new_or_old is either the string 'new' or the string 'old'. If
'new', the returned SHA-1 identifiers are the new values from
each changed reference. If 'old', the SHA-1 identifiers are
the old values from each changed reference.
If reference_change is specified and not None, only the new or
old reference from the specified reference is included in the
return value.
This function returns None if there are no matching revisions
(e.g., because a branch was deleted and new_or_old is 'new').
"""
if not reference_change:
incl_spec = sorted(
getattr(change, new_or_old).sha1
for change in self.changes
if getattr(change, new_or_old)
)
if not incl_spec:
incl_spec = None
elif not getattr(reference_change, new_or_old).commit_sha1:
incl_spec = None
else:
incl_spec = [getattr(reference_change, new_or_old).commit_sha1]
return incl_spec
def _get_commits_spec_excl(self, new_or_old):
"""Get exclusion revisions for determining new or discarded commits.
Return a list of strings suitable as arguments to 'git
rev-list' (or 'git log' or ...) that will exclude all
commits that, depending on the value of new_or_old, were
either previously in the repository (useful for determining
which commits are new to the repository) or currently in the
repository (useful for determining which commits were
discarded from the repository).
new_or_old is either the string 'new' or the string 'old'. If
'new', the commits to be excluded are those that were in the
repository before the push. If 'old', the commits to be
excluded are those that are currently in the repository. """
old_or_new = {'old': 'new', 'new': 'old'}[new_or_old]
excl_revs = self._other_ref_sha1s.union(
getattr(change, old_or_new).sha1
for change in self.changes
if getattr(change, old_or_new).type in ['commit', 'tag']
)
return ['^' + sha1 for sha1 in sorted(excl_revs)]
def get_commits_spec(self, new_or_old, reference_change=None):
"""Get rev-list arguments for added or discarded commits.
Return a list of strings suitable as arguments to 'git
rev-list' (or 'git log' or ...) that select those commits
that, depending on the value of new_or_old, are either new to
the repository or were discarded from the repository.
new_or_old is either the string 'new' or the string 'old'. If
'new', the returned list is used to select commits that are
new to the repository. If 'old', the returned value is used
to select the commits that have been discarded from the
repository.
If reference_change is specified and not None, the new or
discarded commits are limited to those that are reachable from
the new or old value of the specified reference.
This function returns None if there are no added (or discarded)
revisions.
"""
key = (new_or_old, reference_change)
if key not in self.__cached_commits_spec:
ret = self._get_commits_spec_incl(new_or_old, reference_change)
if ret is not None:
ret.extend(self._get_commits_spec_excl(new_or_old))
self.__cached_commits_spec[key] = ret
return self.__cached_commits_spec[key]
def get_new_commits(self, reference_change=None):
"""Return a list of commits added by this push.
Return a list of the object names of commits that were added
by the part of this push represented by reference_change. If
reference_change is None, then return a list of *all* commits
added by this push."""
spec = self.get_commits_spec('new', reference_change)
return git_rev_list(spec)
def get_discarded_commits(self, reference_change):
"""Return a list of commits discarded by this push.
Return a list of the object names of commits that were
entirely discarded from the repository by the part of this
push represented by reference_change."""
spec = self.get_commits_spec('old', reference_change)
return git_rev_list(spec)
def send_emails(self, mailer, body_filter=None):
"""Use send all of the notification emails needed for this push.
Use send all of the notification emails (including reference
change emails and commit emails) needed for this push. Send
the emails using mailer. If body_filter is not None, then use
it to filter the lines that are intended for the email
body."""
# The sha1s of commits that were introduced by this push.
# They will be removed from this set as they are processed, to
# guarantee that one (and only one) email is generated for
# each new commit.
unhandled_sha1s = set(self.get_new_commits())
send_date = IncrementalDateTime()
for change in self.changes:
sha1s = []
for sha1 in reversed(list(self.get_new_commits(change))):
if sha1 in unhandled_sha1s:
sha1s.append(sha1)
unhandled_sha1s.remove(sha1)
# Check if we've got anyone to send to
if not change.recipients:
change.environment.log_warning(
'*** no recipients configured so no email will be sent\n'
'*** for %r update %s->%s\n'
% (change.refname, change.old.sha1, change.new.sha1,)
)
else:
if not change.environment.quiet:
change.environment.log_msg(
'Sending notification emails to: %s\n' % (change.recipients,))
extra_values = {'send_date': send_date.next()}
rev = change.send_single_combined_email(sha1s)
if rev:
mailer.send(
change.generate_combined_email(self, rev, body_filter, extra_values),
rev.recipients,
)
# This change is now fully handled; no need to handle
# individual revisions any further.
continue
else:
mailer.send(
change.generate_email(self, body_filter, extra_values),
change.recipients,
)
max_emails = change.environment.maxcommitemails
if max_emails and len(sha1s) > max_emails:
change.environment.log_warning(
'*** Too many new commits (%d), not sending commit emails.\n' % len(sha1s)
+ '*** Try setting multimailhook.maxCommitEmails to a greater value\n'
+ '*** Currently, multimailhook.maxCommitEmails=%d\n' % max_emails
)
return
for (num, sha1) in enumerate(sha1s):
rev = Revision(change, GitObject(sha1), num=num + 1, tot=len(sha1s))
if not rev.recipients and rev.cc_recipients:
change.environment.log_msg('*** Replacing Cc: with To:\n')
rev.recipients = rev.cc_recipients
rev.cc_recipients = None
if rev.recipients:
extra_values = {'send_date': send_date.next()}
mailer.send(
rev.generate_email(self, body_filter, extra_values),
rev.recipients,
)
# Consistency check:
if unhandled_sha1s:
change.environment.log_error(
'ERROR: No emails were sent for the following new commits:\n'
' %s\n'
% ('\n '.join(sorted(unhandled_sha1s)),)
)
def run_as_post_receive_hook(environment, mailer):
changes = []
for line in sys.stdin:
(oldrev, newrev, refname) = line.strip().split(' ', 2)
changes.append(
ReferenceChange.create(environment, oldrev, newrev, refname)
)
push = Push(changes)
push.send_emails(mailer, body_filter=environment.filter_body)
def run_as_update_hook(environment, mailer, refname, oldrev, newrev, force_send=False):
changes = [
ReferenceChange.create(
environment,
read_git_output(['rev-parse', '--verify', oldrev]),
read_git_output(['rev-parse', '--verify', newrev]),
refname,
),
]
push = Push(changes, force_send)
push.send_emails(mailer, body_filter=environment.filter_body)
def choose_mailer(config, environment):
mailer = config.get('mailer', default='sendmail')
if mailer == 'smtp':
smtpserver = config.get('smtpserver', default='localhost')
smtpservertimeout = float(config.get('smtpservertimeout', default=10.0))
smtpserverdebuglevel = int(config.get('smtpserverdebuglevel', default=0))
smtpencryption = config.get('smtpencryption', default='none')
smtpuser = config.get('smtpuser', default='')
smtppass = config.get('smtppass', default='')
mailer = SMTPMailer(
envelopesender=(environment.get_sender() or environment.get_fromaddr()),
smtpserver=smtpserver, smtpservertimeout=smtpservertimeout,
smtpserverdebuglevel=smtpserverdebuglevel,
smtpencryption=smtpencryption,
smtpuser=smtpuser,
smtppass=smtppass,
)
elif mailer == 'sendmail':
command = config.get('sendmailcommand')
if command:
command = shlex.split(command)
mailer = SendMailer(command=command, envelopesender=environment.get_sender())
else:
environment.log_error(
'fatal: multimailhook.mailer is set to an incorrect value: "%s"\n' % mailer
+ 'please use one of "smtp" or "sendmail".\n'
)
sys.exit(1)
return mailer
KNOWN_ENVIRONMENTS = {
'generic': GenericEnvironmentMixin,
'gitolite': GitoliteEnvironmentMixin,
}
def choose_environment(config, osenv=None, env=None, recipients=None):
if not osenv:
osenv = os.environ
environment_mixins = [
ProjectdescEnvironmentMixin,
ConfigMaxlinesEnvironmentMixin,
ComputeFQDNEnvironmentMixin,
ConfigFilterLinesEnvironmentMixin,
PusherDomainEnvironmentMixin,
ConfigOptionsEnvironmentMixin,
]
environment_kw = {
'osenv': osenv,
'config': config,
}
if not env:
env = config.get('environment')
if not env:
if 'GL_USER' in osenv and 'GL_REPO' in osenv:
env = 'gitolite'
else:
env = 'generic'
environment_mixins.append(KNOWN_ENVIRONMENTS[env])
if recipients:
environment_mixins.insert(0, StaticRecipientsEnvironmentMixin)
environment_kw['refchange_recipients'] = recipients
environment_kw['announce_recipients'] = recipients
environment_kw['revision_recipients'] = recipients
environment_kw['scancommitforcc'] = config.get('scancommitforcc')
else:
environment_mixins.insert(0, ConfigRecipientsEnvironmentMixin)
environment_klass = type(
'EffectiveEnvironment',
tuple(environment_mixins) + (Environment,),
{},
)
return environment_klass(**environment_kw)
def main(args):
parser = optparse.OptionParser(
description=__doc__,
usage='%prog [OPTIONS]\n or: %prog [OPTIONS] REFNAME OLDREV NEWREV',
)
parser.add_option(
'--environment', '--env', action='store', type='choice',
choices=['generic', 'gitolite'], default=None,
help=(
'Choose type of environment is in use. Default is taken from '
'multimailhook.environment if set; otherwise "generic".'
),
)
parser.add_option(
'--stdout', action='store_true', default=False,
help='Output emails to stdout rather than sending them.',
)
parser.add_option(
'--recipients', action='store', default=None,
help='Set list of email recipients for all types of emails.',
)
parser.add_option(
'--show-env', action='store_true', default=False,
help=(
'Write to stderr the values determined for the environment '
'(intended for debugging purposes).'
),
)
parser.add_option(
'--force-send', action='store_true', default=False,
help=(
'Force sending refchange email when using as an update hook. '
'This is useful to work around the unreliable new commits '
'detection in this mode.'
),
)
(options, args) = parser.parse_args(args)
config = Config('multimailhook')
try:
environment = choose_environment(
config, osenv=os.environ,
env=options.environment,
recipients=options.recipients,
)
if options.show_env:
sys.stderr.write('Environment values:\n')
for (k, v) in sorted(environment.get_values().items()):
sys.stderr.write(' %s : %r\n' % (k, v))
sys.stderr.write('\n')
if options.stdout or environment.stdout:
mailer = OutputMailer(sys.stdout)
else:
mailer = choose_mailer(config, environment)
# Dual mode: if arguments were specified on the command line, run
# like an update hook; otherwise, run as a post-receive hook.
if args:
if len(args) != 3:
parser.error('Need zero or three non-option arguments')
(refname, oldrev, newrev) = args
run_as_update_hook(environment, mailer, refname, oldrev, newrev, options.force_send)
else:
run_as_post_receive_hook(environment, mailer)
except ConfigurationException, e:
sys.exit(str(e))
if __name__ == '__main__':
main(sys.argv[1:])
|
gpl-2.0
|
qiankunshe/sky_engine
|
tools/clang/scripts/package.py
|
20
|
9414
|
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script will check out llvm and clang, and then package the results up
to a tgz file."""
import argparse
import fnmatch
import itertools
import os
import shutil
import subprocess
import sys
import tarfile
# Path constants.
THIS_DIR = os.path.dirname(__file__)
THIRD_PARTY_DIR = os.path.join(THIS_DIR, '..', '..', '..', 'third_party')
LLVM_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm')
LLVM_BOOTSTRAP_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm-bootstrap')
LLVM_BOOTSTRAP_INSTALL_DIR = os.path.join(THIRD_PARTY_DIR,
'llvm-bootstrap-install')
LLVM_BUILD_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm-build')
LLVM_RELEASE_DIR = os.path.join(LLVM_BUILD_DIR, 'Release+Asserts')
STAMP_FILE = os.path.join(LLVM_BUILD_DIR, 'cr_build_revision')
def Tee(output, logfile):
logfile.write(output)
print output,
def TeeCmd(cmd, logfile, fail_hard=True):
"""Runs cmd and writes the output to both stdout and logfile."""
# Reading from PIPE can deadlock if one buffer is full but we wait on a
# different one. To work around this, pipe the subprocess's stderr to
# its stdout buffer and don't give it a stdin.
# shell=True is required in cmd.exe since depot_tools has an svn.bat, and
# bat files only work with shell=True set.
proc = subprocess.Popen(cmd, bufsize=1, shell=sys.platform == 'win32',
stdin=open(os.devnull), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
for line in iter(proc.stdout.readline,''):
Tee(line, logfile)
if proc.poll() is not None:
break
exit_code = proc.wait()
if exit_code != 0 and fail_hard:
print 'Failed:', cmd
sys.exit(1)
def PrintTarProgress(tarinfo):
print 'Adding', tarinfo.name
return tarinfo
def main():
parser = argparse.ArgumentParser(description='build and package clang')
parser.add_argument('--gcc-toolchain',
help="the prefix for the GCC version used for building. "
"For /opt/foo/bin/gcc, pass "
"'--gcc-toolchain '/opt/foo'")
args = parser.parse_args()
with open('buildlog.txt', 'w') as log:
Tee('Diff in llvm:\n', log)
TeeCmd(['svn', 'stat', LLVM_DIR], log, fail_hard=False)
TeeCmd(['svn', 'diff', LLVM_DIR], log, fail_hard=False)
Tee('Diff in llvm/tools/clang:\n', log)
TeeCmd(['svn', 'stat', os.path.join(LLVM_DIR, 'tools', 'clang')],
log, fail_hard=False)
TeeCmd(['svn', 'diff', os.path.join(LLVM_DIR, 'tools', 'clang')],
log, fail_hard=False)
# TODO(thakis): compiler-rt is in projects/compiler-rt on Windows but
# llvm/compiler-rt elsewhere. So this diff call is currently only right on
# Windows.
Tee('Diff in llvm/compiler-rt:\n', log)
TeeCmd(['svn', 'stat', os.path.join(LLVM_DIR, 'projects', 'compiler-rt')],
log, fail_hard=False)
TeeCmd(['svn', 'diff', os.path.join(LLVM_DIR, 'projects', 'compiler-rt')],
log, fail_hard=False)
Tee('Diff in llvm/projects/libcxx:\n', log)
TeeCmd(['svn', 'stat', os.path.join(LLVM_DIR, 'projects', 'libcxx')],
log, fail_hard=False)
TeeCmd(['svn', 'diff', os.path.join(LLVM_DIR, 'projects', 'libcxx')],
log, fail_hard=False)
Tee('Diff in llvm/projects/libcxxabi:\n', log)
TeeCmd(['svn', 'stat', os.path.join(LLVM_DIR, 'projects', 'libcxxabi')],
log, fail_hard=False)
TeeCmd(['svn', 'diff', os.path.join(LLVM_DIR, 'projects', 'libcxxabi')],
log, fail_hard=False)
Tee('Starting build\n', log)
# Do a clobber build.
shutil.rmtree(LLVM_BOOTSTRAP_DIR, ignore_errors=True)
shutil.rmtree(LLVM_BOOTSTRAP_INSTALL_DIR, ignore_errors=True)
shutil.rmtree(LLVM_BUILD_DIR, ignore_errors=True)
build_cmd = [sys.executable, os.path.join(THIS_DIR, 'update.py'),
'--bootstrap', '--force-local-build', '--run-tests',
'--no-stdin-hack']
if args.gcc_toolchain is not None:
build_cmd.extend(['--gcc-toolchain', args.gcc_toolchain])
TeeCmd(build_cmd, log)
stamp = open(STAMP_FILE).read().rstrip()
pdir = 'clang-' + stamp
print pdir
shutil.rmtree(pdir, ignore_errors=True)
# Copy a whitelist of files to the directory we're going to tar up.
# This supports the same patterns that the fnmatch module understands.
exe_ext = '.exe' if sys.platform == 'win32' else ''
want = ['bin/llvm-symbolizer' + exe_ext,
'lib/clang/*/asan_blacklist.txt',
# Copy built-in headers (lib/clang/3.x.y/include).
'lib/clang/*/include/*',
]
if sys.platform == 'win32':
want.append('bin/clang-cl.exe')
else:
so_ext = 'dylib' if sys.platform == 'darwin' else 'so'
want.extend(['bin/clang',
'lib/libFindBadConstructs.' + so_ext,
'lib/libBlinkGCPlugin.' + so_ext,
])
if sys.platform == 'darwin':
want.extend(['bin/libc++.1.dylib',
# Copy only the OSX (ASan and profile) and iossim (ASan)
# runtime libraries:
'lib/clang/*/lib/darwin/*asan_osx*',
'lib/clang/*/lib/darwin/*asan_iossim*',
'lib/clang/*/lib/darwin/*profile_osx*',
])
elif sys.platform.startswith('linux'):
# Copy only
# lib/clang/*/lib/linux/libclang_rt.{[atm]san,san,ubsan,profile}-*.a ,
# but not dfsan.
want.extend(['lib/clang/*/lib/linux/*[atm]san*',
'lib/clang/*/lib/linux/*ubsan*',
'lib/clang/*/lib/linux/*libclang_rt.san*',
'lib/clang/*/lib/linux/*profile*',
'lib/clang/*/msan_blacklist.txt',
])
elif sys.platform == 'win32':
want.extend(['lib/clang/*/lib/windows/clang_rt.asan*.dll',
'lib/clang/*/lib/windows/clang_rt.asan*.lib',
'lib/clang/*/include_sanitizer/*',
])
if args.gcc_toolchain is not None:
# Copy the stdlibc++.so.6 we linked Clang against so it can run.
want.append('lib/libstdc++.so.6')
for root, dirs, files in os.walk(LLVM_RELEASE_DIR):
# root: third_party/llvm-build/Release+Asserts/lib/..., rel_root: lib/...
rel_root = root[len(LLVM_RELEASE_DIR)+1:]
rel_files = [os.path.join(rel_root, f) for f in files]
wanted_files = list(set(itertools.chain.from_iterable(
fnmatch.filter(rel_files, p) for p in want)))
if wanted_files:
# Guaranteed to not yet exist at this point:
os.makedirs(os.path.join(pdir, rel_root))
for f in wanted_files:
src = os.path.join(LLVM_RELEASE_DIR, f)
dest = os.path.join(pdir, f)
shutil.copy(src, dest)
# Strip libraries.
if sys.platform == 'darwin' and f.endswith('.dylib'):
# Fix LC_ID_DYLIB for the ASan dynamic libraries to be relative to
# @executable_path.
# TODO(glider): this is transitional. We'll need to fix the dylib
# name either in our build system, or in Clang. See also
# http://crbug.com/344836.
subprocess.call(['install_name_tool', '-id',
'@executable_path/' + os.path.basename(dest), dest])
subprocess.call(['strip', '-x', dest])
elif (sys.platform.startswith('linux') and
os.path.splitext(f)[1] in ['.so', '.a']):
subprocess.call(['strip', '-g', dest])
# Set up symlinks.
if sys.platform != 'win32':
os.symlink('clang', os.path.join(pdir, 'bin', 'clang++'))
os.symlink('clang', os.path.join(pdir, 'bin', 'clang-cl'))
if sys.platform == 'darwin':
os.symlink('libc++.1.dylib', os.path.join(pdir, 'bin', 'libc++.dylib'))
# Also copy libc++ headers.
shutil.copytree(os.path.join(LLVM_BOOTSTRAP_INSTALL_DIR, 'include', 'c++'),
os.path.join(pdir, 'include', 'c++'))
# Copy buildlog over.
shutil.copy('buildlog.txt', pdir)
# Create archive.
tar_entries = ['bin', 'lib', 'buildlog.txt']
if sys.platform == 'darwin':
tar_entries += ['include']
with tarfile.open(pdir + '.tgz', 'w:gz') as tar:
for entry in tar_entries:
tar.add(os.path.join(pdir, entry), arcname=entry, filter=PrintTarProgress)
if sys.platform == 'darwin':
platform = 'Mac'
elif sys.platform == 'win32':
platform = 'Win'
else:
platform = 'Linux_x64'
print 'To upload, run:'
print ('gsutil cp -a public-read %s.tgz '
'gs://chromium-browser-clang/%s/%s.tgz') % (pdir, platform, pdir)
# Zip up gold plugin on Linux.
if sys.platform.startswith('linux'):
golddir = 'llvmgold-' + stamp
shutil.rmtree(golddir, ignore_errors=True)
os.makedirs(os.path.join(golddir, 'lib'))
shutil.copy(os.path.join(LLVM_RELEASE_DIR, 'lib', 'LLVMgold.so'),
os.path.join(golddir, 'lib'))
with tarfile.open(golddir + '.tgz', 'w:gz') as tar:
tar.add(os.path.join(golddir, 'lib'), arcname='lib',
filter=PrintTarProgress)
print ('gsutil cp -a public-read %s.tgz '
'gs://chromium-browser-clang/%s/%s.tgz') % (golddir, platform,
golddir)
# FIXME: Warn if the file already exists on the server.
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
MrLoick/python-for-android
|
python3-alpha/python3-src/Tools/scripts/get-remote-certificate.py
|
112
|
2720
|
#!/usr/bin/env python3
#
# fetch the certificate that the server(s) are providing in PEM form
#
# args are HOST:PORT [, HOST:PORT...]
#
# By Bill Janssen.
import re
import os
import sys
import tempfile
def fetch_server_certificate (host, port):
def subproc(cmd):
from subprocess import Popen, PIPE, STDOUT
proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)
status = proc.wait()
output = proc.stdout.read()
return status, output
def strip_to_x509_cert(certfile_contents, outfile=None):
m = re.search(br"^([-]+BEGIN CERTIFICATE[-]+[\r]*\n"
br".*[\r]*^[-]+END CERTIFICATE[-]+)$",
certfile_contents, re.MULTILINE | re.DOTALL)
if not m:
return None
else:
tn = tempfile.mktemp()
fp = open(tn, "wb")
fp.write(m.group(1) + b"\n")
fp.close()
try:
tn2 = (outfile or tempfile.mktemp())
status, output = subproc(r'openssl x509 -in "%s" -out "%s"' %
(tn, tn2))
if status != 0:
raise RuntimeError('OpenSSL x509 failed with status %s and '
'output: %r' % (status, output))
fp = open(tn2, 'rb')
data = fp.read()
fp.close()
os.unlink(tn2)
return data
finally:
os.unlink(tn)
if sys.platform.startswith("win"):
tfile = tempfile.mktemp()
fp = open(tfile, "w")
fp.write("quit\n")
fp.close()
try:
status, output = subproc(
'openssl s_client -connect "%s:%s" -showcerts < "%s"' %
(host, port, tfile))
finally:
os.unlink(tfile)
else:
status, output = subproc(
'openssl s_client -connect "%s:%s" -showcerts < /dev/null' %
(host, port))
if status != 0:
raise RuntimeError('OpenSSL connect failed with status %s and '
'output: %r' % (status, output))
certtext = strip_to_x509_cert(output)
if not certtext:
raise ValueError("Invalid response received from server at %s:%s" %
(host, port))
return certtext
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.stderr.write(
"Usage: %s HOSTNAME:PORTNUMBER [, HOSTNAME:PORTNUMBER...]\n" %
sys.argv[0])
sys.exit(1)
for arg in sys.argv[1:]:
host, port = arg.split(":")
sys.stdout.buffer.write(fetch_server_certificate(host, int(port)))
sys.exit(0)
|
apache-2.0
|
jylaxp/django
|
django/db/models/fields/files.py
|
35
|
19051
|
import datetime
import os
from django import forms
from django.core import checks
from django.core.files.base import File
from django.core.files.images import ImageFile
from django.core.files.storage import default_storage
from django.db.models import signals
from django.db.models.fields import Field
from django.utils import six
from django.utils.encoding import force_str, force_text
from django.utils.translation import ugettext_lazy as _
class FieldFile(File):
def __init__(self, instance, field, name):
super(FieldFile, self).__init__(None, name)
self.instance = instance
self.field = field
self.storage = field.storage
self._committed = True
def __eq__(self, other):
# Older code may be expecting FileField values to be simple strings.
# By overriding the == operator, it can remain backwards compatibility.
if hasattr(other, 'name'):
return self.name == other.name
return self.name == other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
# The standard File contains most of the necessary properties, but
# FieldFiles can be instantiated without a name, so that needs to
# be checked for here.
def _require_file(self):
if not self:
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file') or self._file is None:
self._file = self.storage.open(self.name, 'rb')
return self._file
def _set_file(self, file):
self._file = file
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
def _get_path(self):
self._require_file()
return self.storage.path(self.name)
path = property(_get_path)
def _get_url(self):
self._require_file()
return self.storage.url(self.name)
url = property(_get_url)
def _get_size(self):
self._require_file()
if not self._committed:
return self.file.size
return self.storage.size(self.name)
size = property(_get_size)
def open(self, mode='rb'):
self._require_file()
self.file.open(mode)
# open() doesn't alter the file's contents, but it does reset the pointer
open.alters_data = True
# In addition to the standard File API, FieldFiles have extra methods
# to further manipulate the underlying file, as well as update the
# associated model instance.
def save(self, name, content, save=True):
name = self.field.generate_filename(self.instance, name)
self.name = self.storage.save(name, content, max_length=self.field.max_length)
setattr(self.instance, self.field.name, self.name)
# Update the filesize cache
self._size = content.size
self._committed = True
# Save the object because it has changed, unless save is False
if save:
self.instance.save()
save.alters_data = True
def delete(self, save=True):
if not self:
return
# Only close the file if it's already open, which we know by the
# presence of self._file
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name)
self.name = None
setattr(self.instance, self.field.name, self.name)
# Delete the filesize cache
if hasattr(self, '_size'):
del self._size
self._committed = False
if save:
self.instance.save()
delete.alters_data = True
def _get_closed(self):
file = getattr(self, '_file', None)
return file is None or file.closed
closed = property(_get_closed)
def close(self):
file = getattr(self, '_file', None)
if file is not None:
file.close()
def __getstate__(self):
# FieldFile needs access to its associated model field and an instance
# it's attached to in order to work properly, but the only necessary
# data to be pickled is the file's name itself. Everything else will
# be restored later, by FileDescriptor below.
return {'name': self.name, 'closed': False, '_committed': True, '_file': None}
class FileDescriptor(object):
"""
The descriptor for the file attribute on the model instance. Returns a
FieldFile when accessed so you can do stuff like::
>>> from myapp.models import MyModel
>>> instance = MyModel.objects.get(pk=1)
>>> instance.file.size
Assigns a file object on assignment so you can do::
>>> with open('/tmp/hello.world', 'r') as f:
... instance.file = File(f)
"""
def __init__(self, field):
self.field = field
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError(
"The '%s' attribute can only be accessed from %s instances."
% (self.field.name, owner.__name__))
# This is slightly complicated, so worth an explanation.
# instance.file`needs to ultimately return some instance of `File`,
# probably a subclass. Additionally, this returned object needs to have
# the FieldFile API so that users can easily do things like
# instance.file.path and have that delegated to the file storage engine.
# Easy enough if we're strict about assignment in __set__, but if you
# peek below you can see that we're not. So depending on the current
# value of the field we have to dynamically construct some sort of
# "thing" to return.
# The instance dict contains whatever was originally assigned
# in __set__.
file = instance.__dict__[self.field.name]
# If this value is a string (instance.file = "path/to/file") or None
# then we simply wrap it with the appropriate attribute class according
# to the file field. [This is FieldFile for FileFields and
# ImageFieldFile for ImageFields; it's also conceivable that user
# subclasses might also want to subclass the attribute class]. This
# object understands how to convert a path to a file, and also how to
# handle None.
if isinstance(file, six.string_types) or file is None:
attr = self.field.attr_class(instance, self.field, file)
instance.__dict__[self.field.name] = attr
# Other types of files may be assigned as well, but they need to have
# the FieldFile interface added to them. Thus, we wrap any other type of
# File inside a FieldFile (well, the field's attr_class, which is
# usually FieldFile).
elif isinstance(file, File) and not isinstance(file, FieldFile):
file_copy = self.field.attr_class(instance, self.field, file.name)
file_copy.file = file
file_copy._committed = False
instance.__dict__[self.field.name] = file_copy
# Finally, because of the (some would say boneheaded) way pickle works,
# the underlying FieldFile might not actually itself have an associated
# file. So we need to reset the details of the FieldFile in those cases.
elif isinstance(file, FieldFile) and not hasattr(file, 'field'):
file.instance = instance
file.field = self.field
file.storage = self.field.storage
# That was fun, wasn't it?
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
class FileField(Field):
# The class to wrap instance attributes in. Accessing the file object off
# the instance will always return an instance of attr_class.
attr_class = FieldFile
# The descriptor to use for accessing the attribute off of the class.
descriptor_class = FileDescriptor
description = _("File")
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
self._primary_key_set_explicitly = 'primary_key' in kwargs
self._unique_set_explicitly = 'unique' in kwargs
self.storage = storage or default_storage
self.upload_to = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FileField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FileField, self).check(**kwargs)
errors.extend(self._check_unique())
errors.extend(self._check_primary_key())
return errors
def _check_unique(self):
if self._unique_set_explicitly:
return [
checks.Error(
"'unique' is not a valid argument for a %s." % self.__class__.__name__,
hint=None,
obj=self,
id='fields.E200',
)
]
else:
return []
def _check_primary_key(self):
if self._primary_key_set_explicitly:
return [
checks.Error(
"'primary_key' is not a valid argument for a %s." % self.__class__.__name__,
hint=None,
obj=self,
id='fields.E201',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(FileField, self).deconstruct()
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
kwargs['upload_to'] = self.upload_to
if self.storage is not default_storage:
kwargs['storage'] = self.storage
return name, path, args, kwargs
def get_internal_type(self):
return "FileField"
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'name'):
value = value.name
return super(FileField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
"Returns field's value prepared for saving into a database."
value = super(FileField, self).get_prep_value(value)
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
return six.text_type(value)
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
file = super(FileField, self).pre_save(model_instance, add)
if file and not file._committed:
# Commit the file to storage prior to saving the model
file.save(file.name, file, save=False)
return file
def contribute_to_class(self, cls, name, **kwargs):
super(FileField, self).contribute_to_class(cls, name, **kwargs)
setattr(cls, self.name, self.descriptor_class(self))
def get_directory_name(self):
return os.path.normpath(force_text(datetime.datetime.now().strftime(force_str(self.upload_to))))
def get_filename(self, filename):
return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename)))
def generate_filename(self, instance, filename):
# If upload_to is a callable, make sure that the path it returns is
# passed through get_valid_name() of the underlying storage.
if callable(self.upload_to):
directory_name, filename = os.path.split(self.upload_to(instance, filename))
filename = self.storage.get_valid_name(filename)
return os.path.normpath(os.path.join(directory_name, filename))
return os.path.join(self.get_directory_name(), self.get_filename(filename))
def save_form_data(self, instance, data):
# Important: None means "no change", other false value means "clear"
# This subtle distinction (rather than a more explicit marker) is
# needed because we need to consume values that are also sane for a
# regular (non Model-) Form to find in its cleaned_data dictionary.
if data is not None:
# This value will be converted to unicode and stored in the
# database, so leaving False as-is is not acceptable.
if not data:
data = ''
setattr(instance, self.name, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FileField, 'max_length': self.max_length}
# If a file has been provided previously, then the form doesn't require
# that a new file is provided this time.
# The code to mark the form field as not required is used by
# form_for_instance, but can probably be removed once form_for_instance
# is gone. ModelForm uses a different method to check for an existing file.
if 'initial' in kwargs:
defaults['required'] = False
defaults.update(kwargs)
return super(FileField, self).formfield(**defaults)
class ImageFileDescriptor(FileDescriptor):
"""
Just like the FileDescriptor, but for ImageFields. The only difference is
assigning the width/height to the width_field/height_field, if appropriate.
"""
def __set__(self, instance, value):
previous_file = instance.__dict__.get(self.field.name)
super(ImageFileDescriptor, self).__set__(instance, value)
# To prevent recalculating image dimensions when we are instantiating
# an object from the database (bug #11084), only update dimensions if
# the field had a value before this assignment. Since the default
# value for FileField subclasses is an instance of field.attr_class,
# previous_file will only be None when we are called from
# Model.__init__(). The ImageField.update_dimension_fields method
# hooked up to the post_init signal handles the Model.__init__() cases.
# Assignment happening outside of Model.__init__() will trigger the
# update right here.
if previous_file is not None:
self.field.update_dimension_fields(instance, force=True)
class ImageFieldFile(ImageFile, FieldFile):
def delete(self, save=True):
# Clear the image dimensions cache
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
super(ImageFieldFile, self).delete(save)
class ImageField(FileField):
attr_class = ImageFieldFile
descriptor_class = ImageFileDescriptor
description = _("Image")
def __init__(self, verbose_name=None, name=None, width_field=None,
height_field=None, **kwargs):
self.width_field, self.height_field = width_field, height_field
super(ImageField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(ImageField, self).check(**kwargs)
errors.extend(self._check_image_library_installed())
return errors
def _check_image_library_installed(self):
try:
from PIL import Image # NOQA
except ImportError:
return [
checks.Error(
'Cannot use ImageField because Pillow is not installed.',
hint=('Get Pillow at https://pypi.python.org/pypi/Pillow '
'or run command "pip install Pillow".'),
obj=self,
id='fields.E210',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ImageField, self).deconstruct()
if self.width_field:
kwargs['width_field'] = self.width_field
if self.height_field:
kwargs['height_field'] = self.height_field
return name, path, args, kwargs
def contribute_to_class(self, cls, name, **kwargs):
super(ImageField, self).contribute_to_class(cls, name, **kwargs)
# Attach update_dimension_fields so that dimension fields declared
# after their corresponding image field don't stay cleared by
# Model.__init__, see bug #11196.
# Only run post-initialization dimension update on non-abstract models
if not cls._meta.abstract:
signals.post_init.connect(self.update_dimension_fields, sender=cls)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
"""
Updates field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions
won't be updated if the dimensions fields are already populated. This
avoids unnecessary recalculation when loading an object from the
database.
Dimensions can be forced to update with force=True, which is how
ImageFileDescriptor.__set__ calls this method.
"""
# Nothing to update if the field doesn't have dimension fields.
has_dimension_fields = self.width_field or self.height_field
if not has_dimension_fields:
return
# getattr will call the ImageFileDescriptor's __get__ method, which
# coerces the assigned value into an instance of self.attr_class
# (ImageFieldFile in this case).
file = getattr(instance, self.attname)
# Nothing to update if we have no file and not being forced to update.
if not file and not force:
return
dimension_fields_filled = not(
(self.width_field and not getattr(instance, self.width_field))
or (self.height_field and not getattr(instance, self.height_field))
)
# When both dimension fields have values, we are most likely loading
# data from the database or updating an image field that already had
# an image stored. In the first case, we don't want to update the
# dimension fields because we are already getting their values from the
# database. In the second case, we do want to update the dimensions
# fields and will skip this return because force will be True since we
# were called from ImageFileDescriptor.__set__.
if dimension_fields_filled and not force:
return
# file should be an instance of ImageFieldFile or should be None.
if file:
width = file.width
height = file.height
else:
# No file, so clear dimensions fields.
width = None
height = None
# Update the width and height fields.
if self.width_field:
setattr(instance, self.width_field, width)
if self.height_field:
setattr(instance, self.height_field, height)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ImageField}
defaults.update(kwargs)
return super(ImageField, self).formfield(**defaults)
|
bsd-3-clause
|
lumy/mingus
|
unittest/test_instrument.py
|
12
|
1957
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
sys.path += ['../']
from mingus.containers.instrument import Instrument, Piano, Guitar
from mingus.containers.note_container import NoteContainer
import unittest
class test_Instrument(unittest.TestCase):
def setUp(self):
self.i = Instrument()
self.p = Piano()
self.g = Guitar()
self.notes = NoteContainer(['A', 'B', 'C', 'D', 'E'])
self.noteslow = NoteContainer(['C-0', 'D-0', 'E-0'])
self.noteshigh = NoteContainer(['A-12', 'B-12', 'C-12', 'D-12', 'E-12'])
def test_note_in_range(self):
for x in self.notes:
self.assert_(self.i.note_in_range(x))
self.assert_(self.p.note_in_range(x))
self.assert_(self.g.note_in_range(x))
for x in self.noteslow + self.noteshigh:
self.assertEqual(False, self.p.note_in_range(x),
'%s should not be able to be played by a Piano'
% x)
self.assertEqual(False, self.g.note_in_range(x),
'%s should not be able to be played by a Guitar'
% x)
def test_can_play_notes(self):
self.assert_(self.i.can_play_notes(self.notes))
self.assert_(self.p.can_play_notes(self.notes))
self.assert_(self.g.can_play_notes(self.notes))
self.assertEqual(False, self.p.can_play_notes(self.noteslow))
self.assertEqual(False, self.g.can_play_notes(self.noteslow))
self.assertEqual(False, self.p.can_play_notes(self.noteshigh))
self.assertEqual(False, self.g.can_play_notes(self.noteshigh))
self.assertEqual(False, self.g.can_play_notes(NoteContainer([
'A',
'B',
'C',
'D',
'E',
'F',
'G',
])))
def suite():
return unittest.TestLoader().loadTestsFromTestCase(test_Instrument)
|
gpl-3.0
|
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/unidecode/x01e.py
|
246
|
3853
|
data = (
'A', # 0x00
'a', # 0x01
'B', # 0x02
'b', # 0x03
'B', # 0x04
'b', # 0x05
'B', # 0x06
'b', # 0x07
'C', # 0x08
'c', # 0x09
'D', # 0x0a
'd', # 0x0b
'D', # 0x0c
'd', # 0x0d
'D', # 0x0e
'd', # 0x0f
'D', # 0x10
'd', # 0x11
'D', # 0x12
'd', # 0x13
'E', # 0x14
'e', # 0x15
'E', # 0x16
'e', # 0x17
'E', # 0x18
'e', # 0x19
'E', # 0x1a
'e', # 0x1b
'E', # 0x1c
'e', # 0x1d
'F', # 0x1e
'f', # 0x1f
'G', # 0x20
'g', # 0x21
'H', # 0x22
'h', # 0x23
'H', # 0x24
'h', # 0x25
'H', # 0x26
'h', # 0x27
'H', # 0x28
'h', # 0x29
'H', # 0x2a
'h', # 0x2b
'I', # 0x2c
'i', # 0x2d
'I', # 0x2e
'i', # 0x2f
'K', # 0x30
'k', # 0x31
'K', # 0x32
'k', # 0x33
'K', # 0x34
'k', # 0x35
'L', # 0x36
'l', # 0x37
'L', # 0x38
'l', # 0x39
'L', # 0x3a
'l', # 0x3b
'L', # 0x3c
'l', # 0x3d
'M', # 0x3e
'm', # 0x3f
'M', # 0x40
'm', # 0x41
'M', # 0x42
'm', # 0x43
'N', # 0x44
'n', # 0x45
'N', # 0x46
'n', # 0x47
'N', # 0x48
'n', # 0x49
'N', # 0x4a
'n', # 0x4b
'O', # 0x4c
'o', # 0x4d
'O', # 0x4e
'o', # 0x4f
'O', # 0x50
'o', # 0x51
'O', # 0x52
'o', # 0x53
'P', # 0x54
'p', # 0x55
'P', # 0x56
'p', # 0x57
'R', # 0x58
'r', # 0x59
'R', # 0x5a
'r', # 0x5b
'R', # 0x5c
'r', # 0x5d
'R', # 0x5e
'r', # 0x5f
'S', # 0x60
's', # 0x61
'S', # 0x62
's', # 0x63
'S', # 0x64
's', # 0x65
'S', # 0x66
's', # 0x67
'S', # 0x68
's', # 0x69
'T', # 0x6a
't', # 0x6b
'T', # 0x6c
't', # 0x6d
'T', # 0x6e
't', # 0x6f
'T', # 0x70
't', # 0x71
'U', # 0x72
'u', # 0x73
'U', # 0x74
'u', # 0x75
'U', # 0x76
'u', # 0x77
'U', # 0x78
'u', # 0x79
'U', # 0x7a
'u', # 0x7b
'V', # 0x7c
'v', # 0x7d
'V', # 0x7e
'v', # 0x7f
'W', # 0x80
'w', # 0x81
'W', # 0x82
'w', # 0x83
'W', # 0x84
'w', # 0x85
'W', # 0x86
'w', # 0x87
'W', # 0x88
'w', # 0x89
'X', # 0x8a
'x', # 0x8b
'X', # 0x8c
'x', # 0x8d
'Y', # 0x8e
'y', # 0x8f
'Z', # 0x90
'z', # 0x91
'Z', # 0x92
'z', # 0x93
'Z', # 0x94
'z', # 0x95
'h', # 0x96
't', # 0x97
'w', # 0x98
'y', # 0x99
'a', # 0x9a
'S', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'Ss', # 0x9e
'[?]', # 0x9f
'A', # 0xa0
'a', # 0xa1
'A', # 0xa2
'a', # 0xa3
'A', # 0xa4
'a', # 0xa5
'A', # 0xa6
'a', # 0xa7
'A', # 0xa8
'a', # 0xa9
'A', # 0xaa
'a', # 0xab
'A', # 0xac
'a', # 0xad
'A', # 0xae
'a', # 0xaf
'A', # 0xb0
'a', # 0xb1
'A', # 0xb2
'a', # 0xb3
'A', # 0xb4
'a', # 0xb5
'A', # 0xb6
'a', # 0xb7
'E', # 0xb8
'e', # 0xb9
'E', # 0xba
'e', # 0xbb
'E', # 0xbc
'e', # 0xbd
'E', # 0xbe
'e', # 0xbf
'E', # 0xc0
'e', # 0xc1
'E', # 0xc2
'e', # 0xc3
'E', # 0xc4
'e', # 0xc5
'E', # 0xc6
'e', # 0xc7
'I', # 0xc8
'i', # 0xc9
'I', # 0xca
'i', # 0xcb
'O', # 0xcc
'o', # 0xcd
'O', # 0xce
'o', # 0xcf
'O', # 0xd0
'o', # 0xd1
'O', # 0xd2
'o', # 0xd3
'O', # 0xd4
'o', # 0xd5
'O', # 0xd6
'o', # 0xd7
'O', # 0xd8
'o', # 0xd9
'O', # 0xda
'o', # 0xdb
'O', # 0xdc
'o', # 0xdd
'O', # 0xde
'o', # 0xdf
'O', # 0xe0
'o', # 0xe1
'O', # 0xe2
'o', # 0xe3
'U', # 0xe4
'u', # 0xe5
'U', # 0xe6
'u', # 0xe7
'U', # 0xe8
'u', # 0xe9
'U', # 0xea
'u', # 0xeb
'U', # 0xec
'u', # 0xed
'U', # 0xee
'u', # 0xef
'U', # 0xf0
'u', # 0xf1
'Y', # 0xf2
'y', # 0xf3
'Y', # 0xf4
'y', # 0xf5
'Y', # 0xf6
'y', # 0xf7
'Y', # 0xf8
'y', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
bsd-3-clause
|
LarryHillyer/PoolHost
|
PoolHost/env/Lib/site-packages/django/template/context.py
|
57
|
8669
|
import warnings
from contextlib import contextmanager
from copy import copy
from django.utils.deprecation import RemovedInDjango20Warning
# Hard-coded processor for easier use of CSRF protection.
_builtin_context_processors = ('django.template.context_processors.csrf',)
class ContextPopException(Exception):
"pop() has been called more times than push()"
pass
class ContextDict(dict):
def __init__(self, context, *args, **kwargs):
super(ContextDict, self).__init__(*args, **kwargs)
context.dicts.append(self)
self.context = context
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.context.pop()
class BaseContext(object):
def __init__(self, dict_=None):
self._reset_dicts(dict_)
def _reset_dicts(self, value=None):
builtins = {'True': True, 'False': False, 'None': None}
self.dicts = [builtins]
if value is not None:
self.dicts.append(value)
def __copy__(self):
duplicate = copy(super(BaseContext, self))
duplicate.dicts = self.dicts[:]
return duplicate
def __repr__(self):
return repr(self.dicts)
def __iter__(self):
for d in reversed(self.dicts):
yield d
def push(self, *args, **kwargs):
dicts = []
for d in args:
if isinstance(d, BaseContext):
dicts += d.dicts[1:]
else:
dicts.append(d)
return ContextDict(self, *dicts, **kwargs)
def pop(self):
if len(self.dicts) == 1:
raise ContextPopException
return self.dicts.pop()
def __setitem__(self, key, value):
"Set a variable in the current context"
self.dicts[-1][key] = value
def __getitem__(self, key):
"Get a variable's value, starting at the current context and going upward"
for d in reversed(self.dicts):
if key in d:
return d[key]
raise KeyError(key)
def __delitem__(self, key):
"Delete a variable from the current context"
del self.dicts[-1][key]
def has_key(self, key):
warnings.warn(
"%s.has_key() is deprecated in favor of the 'in' operator." % self.__class__.__name__,
RemovedInDjango20Warning
)
return key in self
def __contains__(self, key):
for d in self.dicts:
if key in d:
return True
return False
def get(self, key, otherwise=None):
for d in reversed(self.dicts):
if key in d:
return d[key]
return otherwise
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def new(self, values=None):
"""
Returns a new context with the same properties, but with only the
values given in 'values' stored.
"""
new_context = copy(self)
new_context._reset_dicts(values)
return new_context
def flatten(self):
"""
Returns self.dicts as one dictionary
"""
flat = {}
for d in self.dicts:
flat.update(d)
return flat
def __eq__(self, other):
"""
Compares two contexts by comparing theirs 'dicts' attributes.
"""
if isinstance(other, BaseContext):
# because dictionaries can be put in different order
# we have to flatten them like in templates
return self.flatten() == other.flatten()
# if it's not comparable return false
return False
class Context(BaseContext):
"A stack container for variable context"
def __init__(self, dict_=None, autoescape=True, use_l10n=None, use_tz=None):
self.autoescape = autoescape
self.use_l10n = use_l10n
self.use_tz = use_tz
self.template_name = "unknown"
self.render_context = RenderContext()
# Set to the original template -- as opposed to extended or included
# templates -- during rendering, see bind_template.
self.template = None
super(Context, self).__init__(dict_)
@contextmanager
def bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
try:
yield
finally:
self.template = None
def __copy__(self):
duplicate = super(Context, self).__copy__()
duplicate.render_context = copy(self.render_context)
return duplicate
def update(self, other_dict):
"Pushes other_dict to the stack of dictionaries in the Context"
if not hasattr(other_dict, '__getitem__'):
raise TypeError('other_dict must be a mapping (dictionary-like) object.')
if isinstance(other_dict, BaseContext):
other_dict = other_dict.dicts[1:].pop()
return ContextDict(self, other_dict)
class RenderContext(BaseContext):
"""
A stack container for storing Template state.
RenderContext simplifies the implementation of template Nodes by providing a
safe place to store state between invocations of a node's `render` method.
The RenderContext also provides scoping rules that are more sensible for
'template local' variables. The render context stack is pushed before each
template is rendered, creating a fresh scope with nothing in it. Name
resolution fails if a variable is not found at the top of the RequestContext
stack. Thus, variables are local to a specific template and don't affect the
rendering of other templates as they would if they were stored in the normal
template context.
"""
def __iter__(self):
for d in self.dicts[-1]:
yield d
def __contains__(self, key):
return key in self.dicts[-1]
def get(self, key, otherwise=None):
return self.dicts[-1].get(key, otherwise)
def __getitem__(self, key):
return self.dicts[-1][key]
class RequestContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in the engine's configuration.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, request, dict_=None, processors=None, use_l10n=None, use_tz=None, autoescape=True):
super(RequestContext, self).__init__(
dict_, use_l10n=use_l10n, use_tz=use_tz, autoescape=autoescape)
self.request = request
self._processors = () if processors is None else tuple(processors)
self._processors_index = len(self.dicts)
# placeholder for context processors output
self.update({})
# empty dict for any new modifications
# (so that context processors don't overwrite them)
self.update({})
@contextmanager
def bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
# Set context processors according to the template engine's settings.
processors = (template.engine.template_context_processors +
self._processors)
updates = {}
for processor in processors:
updates.update(processor(self.request))
self.dicts[self._processors_index] = updates
try:
yield
finally:
self.template = None
# Unset context processors.
self.dicts[self._processors_index] = {}
def new(self, values=None):
new_context = super(RequestContext, self).new(values)
# This is for backwards-compatibility: RequestContexts created via
# Context.new don't include values from context processors.
if hasattr(new_context, '_processors_index'):
del new_context._processors_index
return new_context
def make_context(context, request=None, **kwargs):
"""
Create a suitable Context from a plain dict and optionally an HttpRequest.
"""
if request is None:
context = Context(context, **kwargs)
else:
# The following pattern is required to ensure values from
# context override those from template context processors.
original_context = context
context = RequestContext(request, **kwargs)
if original_context:
context.push(original_context)
return context
|
gpl-3.0
|
gsb-eng/tahoe-lafs
|
misc/coding_tools/check-interfaces.py
|
9
|
9011
|
# To check a particular Tahoe source distribution, this should be invoked from
# the root directory of that distribution as
#
# bin/tahoe @misc/coding_tools/check-interfaces.py
import os, sys, re, platform
import zope.interface as zi
# We use the forked version of verifyClass below.
#from zope.interface.verify import verifyClass
from zope.interface.advice import addClassAdvisor
interesting_modules = re.compile(r'(allmydata)|(foolscap)\..*')
excluded_classnames = re.compile(r'(_)|(Mock)|(Fake)|(Dummy).*')
excluded_file_basenames = re.compile(r'(check)|(bench)_.*')
_other_modules_with_violations = set()
_err = sys.stderr
_report_argname_mismatch = False # very noisy and usually not important
# deep magic
def strictly_implements(*interfaces):
frame = sys._getframe(1)
f_locals = frame.f_locals
# Try to make sure we were called from a class def. Assumes Python > 2.2.
if f_locals is frame.f_globals or '__module__' not in f_locals:
raise TypeError("implements can be used only from a class definition.")
if '__implements_advice_data__' in f_locals:
raise TypeError("implements can be used only once in a class definition.")
def _implements_advice(cls):
interfaces, classImplements = cls.__dict__['__implements_advice_data__']
del cls.__implements_advice_data__
classImplements(cls, *interfaces)
if interesting_modules.match(cls.__module__):
if not excluded_classnames.match(cls.__name__):
for interface in interfaces:
try:
verifyClass(interface, cls)
except Exception, e:
print >>_err, ("%s.%s does not correctly implement %s.%s:\n%s"
% (cls.__module__, cls.__name__,
interface.__module__, interface.__name__, e))
else:
_other_modules_with_violations.add(cls.__module__)
return cls
f_locals['__implements_advice_data__'] = interfaces, zi.classImplements
addClassAdvisor(_implements_advice, depth=2)
def check():
# patchee-monkey
zi.implements = strictly_implements
if len(sys.argv) >= 2:
if sys.argv[1] == '--help' or len(sys.argv) > 2:
print >>_err, "Usage: check-miscaptures.py [SOURCEDIR]"
return
srcdir = sys.argv[1]
else:
# import modules under src/ by default
srcdir = 'src'
# attempt to avoid side-effects from importing command scripts
sys.argv = ['', '--help']
syslow = platform.system().lower()
is_windows = 'cygwin' in syslow or 'windows' in syslow
for (dirpath, dirnames, filenames) in os.walk(srcdir):
for fn in filenames:
(basename, ext) = os.path.splitext(fn)
if ext in ('.pyc', '.pyo') and not os.path.exists(os.path.join(dirpath, basename+'.py')):
print >>_err, ("Warning: no .py source file for %r.\n"
% (os.path.join(dirpath, fn),))
if ext == '.py' and not excluded_file_basenames.match(basename):
relpath = os.path.join(dirpath[len(srcdir)+1:], basename)
module = relpath.replace(os.sep, '/').replace('/', '.')
try:
__import__(module)
except ImportError, e:
if not is_windows and (' _win' in str(e) or 'win32' in str(e)):
print >>_err, ("Warning: %r imports a Windows-specific module, so we cannot check it (%s).\n"
% (module, str(e)))
else:
import traceback
traceback.print_exc(file=_err)
print >>_err
others = list(_other_modules_with_violations)
others.sort()
print >>_err, "There were also interface violations in:\n", ", ".join(others), "\n"
# Forked from
# http://svn.zope.org/*checkout*/Zope3/trunk/src/zope/interface/verify.py?content-type=text%2Fplain&rev=27687
# but modified to report all interface violations rather than just the first.
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Verify interface implementations
$Id$
"""
from zope.interface.exceptions import DoesNotImplement
from zope.interface.exceptions import BrokenMethodImplementation
from types import FunctionType, MethodType
from zope.interface.interface import fromMethod, fromFunction, Method
# This will be monkey-patched when running under Zope 2, so leave this
# here:
MethodTypes = (MethodType, )
def _verify(iface, candidate, tentative=0, vtype=None):
"""Verify that 'candidate' might correctly implements 'iface'.
This involves:
o Making sure the candidate defines all the necessary methods
o Making sure the methods have the correct signature
o Making sure the candidate asserts that it implements the interface
Note that this isn't the same as verifying that the class does
implement the interface.
If optional tentative is true, suppress the "is implemented by" test.
"""
if vtype == 'c':
tester = iface.implementedBy
else:
tester = iface.providedBy
violations = []
def format(e):
return " " + str(e).strip() + "\n"
if not tentative and not tester(candidate):
violations.append(format(DoesNotImplement(iface)))
# Here the `desc` is either an `Attribute` or `Method` instance
for name, desc in iface.namesAndDescriptions(1):
if not hasattr(candidate, name):
if (not isinstance(desc, Method)) and vtype == 'c':
# We can't verify non-methods on classes, since the
# class may provide attrs in it's __init__.
continue
if isinstance(desc, Method):
violations.append(" The %r method was not provided.\n" % (name,))
else:
violations.append(" The %r attribute was not provided.\n" % (name,))
continue
attr = getattr(candidate, name)
if not isinstance(desc, Method):
# If it's not a method, there's nothing else we can test
continue
if isinstance(attr, FunctionType):
# should never get here, since classes should not provide functions
meth = fromFunction(attr, iface, name=name)
elif (isinstance(attr, MethodTypes)
and type(attr.im_func) is FunctionType):
meth = fromMethod(attr, iface, name)
else:
if not callable(attr):
violations.append(format(BrokenMethodImplementation(name, "Not a method")))
# sigh, it's callable, but we don't know how to intrspect it, so
# we have to give it a pass.
continue
# Make sure that the required and implemented method signatures are
# the same.
desc = desc.getSignatureInfo()
meth = meth.getSignatureInfo()
mess = _incompat(desc, meth)
if mess:
violations.append(format(BrokenMethodImplementation(name, mess)))
if violations:
raise Exception("".join(violations))
return True
def verifyClass(iface, candidate, tentative=0):
return _verify(iface, candidate, tentative, vtype='c')
def verifyObject(iface, candidate, tentative=0):
return _verify(iface, candidate, tentative, vtype='o')
def _incompat(required, implemented):
if len(implemented['required']) > len(required['required']):
return 'implementation requires too many arguments'
if ((len(implemented['positional']) < len(required['positional']))
and not implemented['varargs']):
return "implementation doesn't allow enough arguments"
if required['kwargs'] and not implemented['kwargs']:
return "implementation doesn't support keyword arguments"
if required['varargs'] and not implemented['varargs']:
return "implementation doesn't support variable arguments"
if (_report_argname_mismatch and required['positional'] !=
implemented['positional'][:len(required['positional'])]
and implemented['kwargs'] is None):
return 'implementation has different argument names'
if __name__ == "__main__":
check()
# Avoid spurious warnings about ignored exceptions during shutdown by doing a hard exit.
os._exit(0)
|
gpl-2.0
|
shipci/sympy
|
sympy/polys/tests/test_injections.py
|
126
|
1795
|
"""Tests for functions that inject symbols into the global namespace. """
from sympy.polys.rings import vring
from sympy.polys.fields import vfield
from sympy.polys.domains import QQ
from sympy.utilities.pytest import raises
# make r1 with call-depth = 1
def _make_r1():
return vring("r1", QQ)
# make r2 with call-depth = 2
def __make_r2():
return vring("r2", QQ)
def _make_r2():
return __make_r2()
def test_vring():
R = vring("r", QQ)
assert r == R.gens[0]
R = vring("rb rbb rcc rzz _rx", QQ)
assert rb == R.gens[0]
assert rbb == R.gens[1]
assert rcc == R.gens[2]
assert rzz == R.gens[3]
assert _rx == R.gens[4]
R = vring(['rd', 're', 'rfg'], QQ)
assert rd == R.gens[0]
assert re == R.gens[1]
assert rfg == R.gens[2]
# see if vring() really injects into global namespace
raises(NameError, lambda: r1)
R = _make_r1()
assert r1 == R.gens[0]
raises(NameError, lambda: r2)
R = _make_r2()
assert r2 == R.gens[0]
# make f1 with call-depth = 1
def _make_f1():
return vfield("f1", QQ)
# make f2 with call-depth = 2
def __make_f2():
return vfield("f2", QQ)
def _make_f2():
return __make_f2()
def test_vfield():
F = vfield("f", QQ)
assert f == F.gens[0]
F = vfield("fb fbb fcc fzz _fx", QQ)
assert fb == F.gens[0]
assert fbb == F.gens[1]
assert fcc == F.gens[2]
assert fzz == F.gens[3]
assert _fx == F.gens[4]
F = vfield(['fd', 'fe', 'ffg'], QQ)
assert fd == F.gens[0]
assert fe == F.gens[1]
assert ffg == F.gens[2]
# see if vfield() really injects into global namespace
raises(NameError, lambda: f1)
F = _make_f1()
assert f1 == F.gens[0]
raises(NameError, lambda: f2)
F = _make_f2()
assert f2 == F.gens[0]
|
bsd-3-clause
|
redhat-openstack/django
|
tests/admin_filters/models.py
|
115
|
1359
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Book(models.Model):
title = models.CharField(max_length=50)
year = models.PositiveIntegerField(null=True, blank=True)
author = models.ForeignKey(User, verbose_name="Verbose Author", related_name='books_authored', blank=True, null=True)
contributors = models.ManyToManyField(User, verbose_name="Verbose Contributors", related_name='books_contributed', blank=True, null=True)
is_best_seller = models.NullBooleanField(default=0)
date_registered = models.DateField(null=True)
no = models.IntegerField(verbose_name='number', blank=True, null=True) # This field is intentionally 2 characters long. See #16080.
def __str__(self):
return self.title
@python_2_unicode_compatible
class Department(models.Model):
code = models.CharField(max_length=4, unique=True)
description = models.CharField(max_length=50, blank=True, null=True)
def __str__(self):
return self.description
@python_2_unicode_compatible
class Employee(models.Model):
department = models.ForeignKey(Department, to_field="code")
name = models.CharField(max_length=100)
def __str__(self):
return self.name
|
bsd-3-clause
|
chouseknecht/ansible
|
test/units/modules/network/f5/test_bigip_device_group_member.py
|
22
|
2882
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_device_group_member import Parameters
from library.modules.bigip_device_group_member import ModuleManager
from library.modules.bigip_device_group_member import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_device_group_member import Parameters
from ansible.modules.network.f5.bigip_device_group_member import ModuleManager
from ansible.modules.network.f5.bigip_device_group_member import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='bigip1',
device_group='dg1'
)
p = Parameters(params=args)
assert p.name == 'bigip1'
assert p.device_group == 'dg1'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(
dict(
name="bigip1",
device_group="dg1",
state="present",
provider=dict(
server='localhost',
password='password',
user='admin'
)
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
|
gpl-3.0
|
UOMx/edx-platform
|
lms/djangoapps/shoppingcart/tests/test_views.py
|
13
|
100706
|
"""
Tests for Shopping Cart views
"""
from collections import OrderedDict
import pytz
from urlparse import urlparse
from decimal import Decimal
import json
from django.http import HttpRequest
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import Group, User
from django.contrib.messages.storage.fallback import FallbackStorage
from django.core import mail
from django.core.cache import cache
from pytz import UTC
from freezegun import freeze_time
from datetime import datetime, timedelta
from mock import patch, Mock
import ddt
from common.test.utils import XssTestMixin
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from student.roles import CourseSalesAdminRole
from util.date_utils import get_default_time_display
from util.testing import UrlResetMixin
from shoppingcart.views import _can_download_report, _get_date_from_str
from shoppingcart.models import (
Order, CertificateItem, PaidCourseRegistration, CourseRegCodeItem,
Coupon, CourseRegistrationCode, RegistrationCodeRedemption,
DonationConfiguration,
CouponRedemption)
from student.tests.factories import UserFactory, AdminFactory, CourseModeFactory
from courseware.tests.factories import InstructorFactory
from student.models import CourseEnrollment
from course_modes.models import CourseMode
from edxmako.shortcuts import render_to_response
from embargo.test_utils import restrict_course
from shoppingcart.processors import render_purchase_form_html
from shoppingcart.admin import SoftDeleteCouponAdmin
from shoppingcart.views import initialize_report
from shoppingcart.tests.payment_fake import PaymentFakeView
from shoppingcart.processors.CyberSource2 import sign
def mock_render_purchase_form_html(*args, **kwargs):
return render_purchase_form_html(*args, **kwargs)
form_mock = Mock(side_effect=mock_render_purchase_form_html)
def mock_render_to_response(*args, **kwargs):
return render_to_response(*args, **kwargs)
render_mock = Mock(side_effect=mock_render_to_response)
postpay_mock = Mock()
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
@ddt.ddt
class ShoppingCartViewsTests(SharedModuleStoreTestCase, XssTestMixin):
@classmethod
def setUpClass(cls):
super(ShoppingCartViewsTests, cls).setUpClass()
cls.course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course')
cls.course_key = cls.course.id
verified_course = CourseFactory.create(org='org', number='test', display_name='Test Course')
cls.verified_course_key = verified_course.id
xss_course = CourseFactory.create(org='xssorg', number='test', display_name='<script>alert("XSS")</script>')
cls.xss_course_key = xss_course.id
cls.testing_course = CourseFactory.create(org='edX', number='888', display_name='Testing Super Course')
def setUp(self):
super(ShoppingCartViewsTests, self).setUp()
patcher = patch('student.models.tracker')
self.mock_tracker = patcher.start()
self.user = UserFactory.create()
self.user.set_password('password')
self.user.save()
self.instructor = AdminFactory.create()
self.cost = 40
self.coupon_code = 'abcde'
self.reg_code = 'qwerty'
self.percentage_discount = 10
self.course_mode = CourseMode(
course_id=self.course_key,
mode_slug=CourseMode.HONOR,
mode_display_name="honor cert",
min_price=self.cost
)
self.course_mode.save()
# Saving another testing course mode
self.testing_cost = 20
self.testing_course_mode = CourseMode(
course_id=self.testing_course.id,
mode_slug=CourseMode.HONOR,
mode_display_name="testing honor cert",
min_price=self.testing_cost
)
self.testing_course_mode.save()
# And for the XSS course
CourseMode(
course_id=self.xss_course_key,
mode_slug=CourseMode.HONOR,
mode_display_name="honor cert",
min_price=self.cost
).save()
# And the verified course
self.verified_course_mode = CourseMode(
course_id=self.verified_course_key,
mode_slug=CourseMode.HONOR,
mode_display_name="honor cert",
min_price=self.cost
)
self.verified_course_mode.save()
self.cart = Order.get_cart_for_user(self.user)
self.addCleanup(patcher.stop)
self.now = datetime.now(pytz.UTC)
self.yesterday = self.now - timedelta(days=1)
self.tomorrow = self.now + timedelta(days=1)
def get_discount(self, cost):
"""
This method simple return the discounted amount
"""
val = Decimal("{0:.2f}".format(Decimal(self.percentage_discount / 100.00) * cost))
return cost - val
def add_coupon(self, course_key, is_active, code):
"""
add dummy coupon into models
"""
coupon = Coupon(code=code, description='testing code', course_id=course_key,
percentage_discount=self.percentage_discount, created_by=self.user, is_active=is_active)
coupon.save()
def add_reg_code(self, course_key, mode_slug=None, is_valid=True):
"""
add dummy registration code into models
"""
if mode_slug is None:
mode_slug = self.course_mode.mode_slug
course_reg_code = CourseRegistrationCode(
code=self.reg_code, course_id=course_key,
created_by=self.user, mode_slug=mode_slug,
is_valid=is_valid
)
course_reg_code.save()
def _add_course_mode(self, min_price=50, mode_slug='honor', expiration_date=None):
"""
Adds a course mode to the test course.
"""
mode = CourseModeFactory.create()
mode.course_id = self.course.id
mode.min_price = min_price
mode.mode_slug = mode_slug
mode.expiration_date = expiration_date
mode.save()
return mode
def add_course_to_user_cart(self, course_key):
"""
adding course to user cart
"""
self.login_user()
reg_item = PaidCourseRegistration.add_to_order(self.cart, course_key, mode_slug=self.course_mode.mode_slug)
return reg_item
def login_user(self):
self.client.login(username=self.user.username, password="password")
def test_add_course_to_cart_anon(self):
resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()]))
self.assertEqual(resp.status_code, 403)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_billing_details(self):
billing_url = reverse('billing_details')
self.login_user()
# page not found error because order_type is not business
resp = self.client.get(billing_url)
self.assertEqual(resp.status_code, 404)
#chagne the order_type to business
self.cart.order_type = 'business'
self.cart.save()
resp = self.client.get(billing_url)
self.assertEqual(resp.status_code, 200)
((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name
self.assertEqual(template, 'shoppingcart/billing_details.html')
# check for the default currency in the context
self.assertEqual(context['currency'], 'usd')
self.assertEqual(context['currency_symbol'], '$')
data = {'company_name': 'Test Company', 'company_contact_name': 'JohnDoe',
'company_contact_email': 'john@est.com', 'recipient_name': 'Mocker',
'recipient_email': 'mock@germ.com', 'company_address_line_1': 'DC Street # 1',
'company_address_line_2': '',
'company_city': 'DC', 'company_state': 'NY', 'company_zip': '22003', 'company_country': 'US',
'customer_reference_number': 'PO#23'}
resp = self.client.post(billing_url, data)
self.assertEqual(resp.status_code, 200)
@patch('shoppingcart.views.render_to_response', render_mock)
@override_settings(PAID_COURSE_REGISTRATION_CURRENCY=['PKR', 'Rs'])
def test_billing_details_with_override_currency_settings(self):
billing_url = reverse('billing_details')
self.login_user()
#chagne the order_type to business
self.cart.order_type = 'business'
self.cart.save()
resp = self.client.get(billing_url)
self.assertEqual(resp.status_code, 200)
((template, context), __) = render_mock.call_args # pylint: disable=redefined-outer-name
self.assertEqual(template, 'shoppingcart/billing_details.html')
# check for the override currency settings in the context
self.assertEqual(context['currency'], 'PKR')
self.assertEqual(context['currency_symbol'], 'Rs')
def test_same_coupon_code_applied_on_multiple_items_in_the_cart(self):
"""
test to check that that the same coupon code applied on multiple
items in the cart.
"""
self.login_user()
# add first course to user cart
resp = self.client.post(
reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()])
)
self.assertEqual(resp.status_code, 200)
# add and apply the coupon code to course in the cart
self.add_coupon(self.course_key, True, self.coupon_code)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
# now add the same coupon code to the second course(testing_course)
self.add_coupon(self.testing_course.id, True, self.coupon_code)
#now add the second course to cart, the coupon code should be
# applied when adding the second course to the cart
resp = self.client.post(
reverse('shoppingcart.views.add_course_to_cart', args=[self.testing_course.id.to_deprecated_string()])
)
self.assertEqual(resp.status_code, 200)
#now check the user cart and see that the discount has been applied on both the courses
resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[]))
self.assertEqual(resp.status_code, 200)
#first course price is 40$ and the second course price is 20$
# after 10% discount on both the courses the total price will be 18+36 = 54
self.assertIn('54.00', resp.content)
def test_add_course_to_cart_already_in_cart(self):
PaidCourseRegistration.add_to_order(self.cart, self.course_key)
self.login_user()
resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()]))
self.assertEqual(resp.status_code, 400)
self.assertIn('The course {0} is already in your cart.'.format(self.course_key.to_deprecated_string()), resp.content)
def test_course_discount_invalid_coupon(self):
self.add_coupon(self.course_key, True, self.coupon_code)
self.add_course_to_user_cart(self.course_key)
non_existing_code = "non_existing_code"
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': non_existing_code})
self.assertEqual(resp.status_code, 404)
self.assertIn("Discount does not exist against code '{0}'.".format(non_existing_code), resp.content)
def test_valid_qty_greater_then_one_and_purchase_type_should_business(self):
qty = 2
item = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty})
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
self.assertEqual(data['total_cost'], item.unit_cost * qty)
cart = Order.get_cart_for_user(self.user)
self.assertEqual(cart.order_type, 'business')
def test_in_valid_qty_case(self):
# invalid quantity, Quantity must be between 1 and 1000.
qty = 0
item = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty})
self.assertEqual(resp.status_code, 400)
self.assertIn("Quantity must be between 1 and 1000.", resp.content)
# invalid quantity, Quantity must be an integer.
qty = 'abcde'
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty})
self.assertEqual(resp.status_code, 400)
self.assertIn("Quantity must be an integer.", resp.content)
# invalid quantity, Quantity is not present in request
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id})
self.assertEqual(resp.status_code, 400)
self.assertIn("Quantity must be between 1 and 1000.", resp.content)
def test_valid_qty_but_item_not_found(self):
qty = 2
item_id = '-1'
self.login_user()
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item_id, 'qty': qty})
self.assertEqual(resp.status_code, 404)
self.assertEqual('Order item does not exist.', resp.content)
# now testing the case if item id not found in request,
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'qty': qty})
self.assertEqual(resp.status_code, 400)
self.assertEqual('Order item not found in request.', resp.content)
def test_purchase_type_should_be_personal_when_qty_is_one(self):
qty = 1
item = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty})
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
self.assertEqual(data['total_cost'], item.unit_cost * 1)
cart = Order.get_cart_for_user(self.user)
self.assertEqual(cart.order_type, 'personal')
def test_purchase_type_on_removing_item_and_cart_has_item_with_qty_one(self):
qty = 5
self.add_course_to_user_cart(self.course_key)
item2 = self.add_course_to_user_cart(self.testing_course.id)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item2.id, 'qty': qty})
self.assertEqual(resp.status_code, 200)
cart = Order.get_cart_for_user(self.user)
cart_items = cart.orderitem_set.all()
test_flag = False
for cartitem in cart_items:
if cartitem.qty == 5:
test_flag = True
resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': cartitem.id})
self.assertEqual(resp.status_code, 200)
self.assertTrue(test_flag)
cart = Order.get_cart_for_user(self.user)
self.assertEqual(cart.order_type, 'personal')
def test_billing_details_btn_in_cart_when_qty_is_greater_than_one(self):
qty = 5
item = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty})
self.assertEqual(resp.status_code, 200)
resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[]))
self.assertIn("Billing Details", resp.content)
def test_purchase_type_should_be_personal_when_remove_all_items_from_cart(self):
item1 = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item1.id, 'qty': 2})
self.assertEqual(resp.status_code, 200)
item2 = self.add_course_to_user_cart(self.testing_course.id)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item2.id, 'qty': 5})
self.assertEqual(resp.status_code, 200)
cart = Order.get_cart_for_user(self.user)
cart_items = cart.orderitem_set.all()
test_flag = False
for cartitem in cart_items:
test_flag = True
resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': cartitem.id})
self.assertEqual(resp.status_code, 200)
self.assertTrue(test_flag)
cart = Order.get_cart_for_user(self.user)
self.assertEqual(cart.order_type, 'personal')
def test_use_valid_coupon_code_and_qty_is_greater_than_one(self):
qty = 5
item = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty})
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
self.assertEqual(data['total_cost'], item.unit_cost * qty)
# use coupon code
self.add_coupon(self.course_key, True, self.coupon_code)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
item = self.cart.orderitem_set.all().select_subclasses()[0]
self.assertEquals(item.unit_cost * qty, 180)
def test_course_discount_invalid_reg_code(self):
self.add_reg_code(self.course_key)
self.add_course_to_user_cart(self.course_key)
non_existing_code = "non_existing_code"
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': non_existing_code})
self.assertEqual(resp.status_code, 404)
self.assertIn("Discount does not exist against code '{0}'.".format(non_existing_code), resp.content)
def test_course_discount_inactive_coupon(self):
self.add_coupon(self.course_key, False, self.coupon_code)
self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 404)
self.assertIn("Discount does not exist against code '{0}'.".format(self.coupon_code), resp.content)
def test_course_does_not_exist_in_cart_against_valid_coupon(self):
course_key = self.course_key.to_deprecated_string() + 'testing'
self.add_coupon(course_key, True, self.coupon_code)
self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 404)
self.assertIn("Discount does not exist against code '{0}'.".format(self.coupon_code), resp.content)
def test_inactive_registration_code_returns_error(self):
"""
test to redeem inactive registration code and
it returns an error.
"""
course_key = self.course_key.to_deprecated_string()
self.add_reg_code(course_key, is_valid=False)
self.add_course_to_user_cart(self.course_key)
# now apply the inactive registration code
# it will raise an exception
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code})
self.assertEqual(resp.status_code, 400)
self.assertIn(
"This enrollment code ({enrollment_code}) is no longer valid.".format(
enrollment_code=self.reg_code), resp.content)
def test_course_does_not_exist_in_cart_against_valid_reg_code(self):
course_key = self.course_key.to_deprecated_string() + 'testing'
self.add_reg_code(course_key)
self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code})
self.assertEqual(resp.status_code, 404)
self.assertIn("Code '{0}' is not valid for any course in the shopping cart.".format(self.reg_code), resp.content)
def test_cart_item_qty_greater_than_1_against_valid_reg_code(self):
course_key = self.course_key.to_deprecated_string()
self.add_reg_code(course_key)
item = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': 4})
self.assertEqual(resp.status_code, 200)
# now update the cart item quantity and then apply the registration code
# it will raise an exception
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code})
self.assertEqual(resp.status_code, 404)
self.assertIn("Cart item quantity should not be greater than 1 when applying activation code", resp.content)
@ddt.data(True, False)
def test_reg_code_uses_associated_mode(self, expired_mode):
"""Tests the use of reg codes on verified courses, expired or active. """
course_key = self.course_key.to_deprecated_string()
expiration_date = self.yesterday if expired_mode else self.tomorrow
self._add_course_mode(mode_slug='verified', expiration_date=expiration_date)
self.add_reg_code(course_key, mode_slug='verified')
self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('register_code_redemption', args=[self.reg_code]), HTTP_HOST='localhost')
self.assertEqual(resp.status_code, 200)
self.assertIn(self.course.display_name, resp.content)
@ddt.data(True, False)
def test_reg_code_uses_unknown_mode(self, expired_mode):
"""Tests the use of reg codes on verified courses, expired or active. """
course_key = self.course_key.to_deprecated_string()
expiration_date = self.yesterday if expired_mode else self.tomorrow
self._add_course_mode(mode_slug='verified', expiration_date=expiration_date)
self.add_reg_code(course_key, mode_slug='bananas')
self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('register_code_redemption', args=[self.reg_code]), HTTP_HOST='localhost')
self.assertEqual(resp.status_code, 200)
self.assertIn(self.course.display_name, resp.content)
self.assertIn("error processing your redeem code", resp.content)
def test_course_discount_for_valid_active_coupon_code(self):
self.add_coupon(self.course_key, True, self.coupon_code)
self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
# unit price should be updated for that course
item = self.cart.orderitem_set.all().select_subclasses()[0]
self.assertEquals(item.unit_cost, self.get_discount(self.cost))
# after getting 10 percent discount
self.assertEqual(self.cart.total_cost, self.get_discount(self.cost))
# now using the same coupon code against the same order.
# Only one coupon redemption should be allowed per order.
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 400)
self.assertIn("Only one coupon redemption is allowed against an order", resp.content)
def test_course_discount_against_two_distinct_coupon_codes(self):
self.add_coupon(self.course_key, True, self.coupon_code)
self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
# unit price should be updated for that course
item = self.cart.orderitem_set.all().select_subclasses()[0]
self.assertEquals(item.unit_cost, self.get_discount(self.cost))
# now using another valid active coupon code.
# Only one coupon redemption should be allowed per order.
self.add_coupon(self.course_key, True, 'abxyz')
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': 'abxyz'})
self.assertEqual(resp.status_code, 400)
self.assertIn("Only one coupon redemption is allowed against an order", resp.content)
def test_same_coupons_code_on_multiple_courses(self):
# add two same coupon codes on two different courses
self.add_coupon(self.course_key, True, self.coupon_code)
self.add_coupon(self.testing_course.id, True, self.coupon_code)
self.add_course_to_user_cart(self.course_key)
self.add_course_to_user_cart(self.testing_course.id)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
# unit price should be updated for that course
item = self.cart.orderitem_set.all().select_subclasses()[0]
self.assertEquals(item.unit_cost, self.get_discount(self.cost))
item = self.cart.orderitem_set.all().select_subclasses()[1]
self.assertEquals(item.unit_cost, self.get_discount(self.testing_cost))
def test_soft_delete_coupon(self):
self.add_coupon(self.course_key, True, self.coupon_code)
coupon = Coupon(code='TestCode', description='testing', course_id=self.course_key,
percentage_discount=12, created_by=self.user, is_active=True)
coupon.save()
self.assertEquals(coupon.__unicode__(), '[Coupon] code: TestCode course: MITx/999/Robot_Super_Course')
admin = User.objects.create_user('Mark', 'admin+courses@edx.org', 'foo')
admin.is_staff = True
get_coupon = Coupon.objects.get(id=1)
request = HttpRequest()
request.user = admin
request.session = 'session'
messages = FallbackStorage(request)
request._messages = messages # pylint: disable=protected-access
coupon_admin = SoftDeleteCouponAdmin(Coupon, AdminSite())
test_query_set = coupon_admin.queryset(request)
test_actions = coupon_admin.get_actions(request)
self.assertIn('really_delete_selected', test_actions['really_delete_selected'])
self.assertEqual(get_coupon.is_active, True)
coupon_admin.really_delete_selected(request, test_query_set)
for coupon in test_query_set:
self.assertEqual(coupon.is_active, False)
coupon_admin.delete_model(request, get_coupon)
self.assertEqual(get_coupon.is_active, False)
coupon = Coupon(code='TestCode123', description='testing123', course_id=self.course_key,
percentage_discount=22, created_by=self.user, is_active=True)
coupon.save()
test_query_set = coupon_admin.queryset(request)
coupon_admin.really_delete_selected(request, test_query_set)
for coupon in test_query_set:
self.assertEqual(coupon.is_active, False)
def test_course_free_discount_for_valid_active_reg_code(self):
self.add_reg_code(self.course_key)
self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code})
self.assertEqual(resp.status_code, 200)
redeem_url = reverse('register_code_redemption', args=[self.reg_code])
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
#now activate the user by enrolling him/her to the course
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
# now testing registration code already used scenario, reusing the same code
# the item has been removed when using the registration code for the first time
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code})
self.assertEqual(resp.status_code, 400)
self.assertIn("This enrollment code ({enrollment_code}) is not valid.".format(
enrollment_code=self.reg_code
), resp.content)
def test_upgrade_from_valid_reg_code(self):
"""Use a valid registration code to upgrade from honor to verified mode. """
# Ensure the course has a verified mode
course_key = self.course_key.to_deprecated_string()
self._add_course_mode(mode_slug='verified')
self.add_reg_code(course_key, mode_slug='verified')
# Enroll as honor in the course with the current user.
CourseEnrollment.enroll(self.user, self.course_key, mode=CourseMode.HONOR)
self.login_user()
current_enrollment, __ = CourseEnrollment.enrollment_mode_for_user(self.user, self.course_key)
self.assertEquals('honor', current_enrollment)
redeem_url = reverse('register_code_redemption', args=[self.reg_code])
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
#now activate the user by enrolling him/her to the course
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
# Once upgraded, should be "verified"
current_enrollment, __ = CourseEnrollment.enrollment_mode_for_user(self.user, self.course_key)
self.assertEquals('verified', current_enrollment)
@patch('shoppingcart.views.log.debug')
def test_non_existing_coupon_redemption_on_removing_item(self, debug_log):
reg_item = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]),
{'id': reg_item.id})
debug_log.assert_called_with(
'Code redemption does not exist for order item id=%s.',
str(reg_item.id)
)
self.assertEqual(resp.status_code, 200)
self.assertEquals(self.cart.orderitem_set.count(), 0)
@patch('shoppingcart.views.log.info')
def test_existing_coupon_redemption_on_removing_item(self, info_log):
self.add_coupon(self.course_key, True, self.coupon_code)
reg_item = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]),
{'id': reg_item.id})
self.assertEqual(resp.status_code, 200)
self.assertEquals(self.cart.orderitem_set.count(), 0)
info_log.assert_called_with(
'Coupon "%s" redemption entry removed for user "%s" for order item "%s"',
self.coupon_code,
self.user,
str(reg_item.id)
)
@patch('shoppingcart.views.log.info')
def test_reset_redemption_for_coupon(self, info_log):
self.add_coupon(self.course_key, True, self.coupon_code)
reg_item = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
resp = self.client.post(reverse('shoppingcart.views.reset_code_redemption', args=[]))
self.assertEqual(resp.status_code, 200)
info_log.assert_called_with(
'Coupon redemption entry removed for user %s for order %s',
self.user,
reg_item.id
)
@patch('shoppingcart.views.log.info')
def test_coupon_discount_for_multiple_courses_in_cart(self, info_log):
reg_item = self.add_course_to_user_cart(self.course_key)
self.add_coupon(self.course_key, True, self.coupon_code)
cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.assertEquals(self.cart.orderitem_set.count(), 2)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
# unit_cost should be updated for that particular course for which coupon code is registered
items = self.cart.orderitem_set.all().select_subclasses()
for item in items:
if item.id == reg_item.id:
self.assertEquals(item.unit_cost, self.get_discount(self.cost))
self.assertEquals(item.list_price, self.cost)
elif item.id == cert_item.id:
self.assertEquals(item.list_price, self.cost)
self.assertEquals(item.unit_cost, self.cost)
# Delete the discounted item, corresponding coupon redemption should
# be removed for that particular discounted item
resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]),
{'id': reg_item.id})
self.assertEqual(resp.status_code, 200)
self.assertEquals(self.cart.orderitem_set.count(), 1)
info_log.assert_called_with(
'Coupon "%s" redemption entry removed for user "%s" for order item "%s"',
self.coupon_code,
self.user,
str(reg_item.id)
)
@patch('shoppingcart.views.log.info')
def test_delete_certificate_item(self, info_log):
self.add_course_to_user_cart(self.course_key)
cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.assertEquals(self.cart.orderitem_set.count(), 2)
# Delete the discounted item, corresponding coupon redemption should be removed for that particular discounted item
resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]),
{'id': cert_item.id})
self.assertEqual(resp.status_code, 200)
self.assertEquals(self.cart.orderitem_set.count(), 1)
info_log.assert_called_with("order item %s removed for user %s", str(cert_item.id), self.user)
@patch('shoppingcart.views.log.info')
def test_remove_coupon_redemption_on_clear_cart(self, info_log):
reg_item = self.add_course_to_user_cart(self.course_key)
CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.assertEquals(self.cart.orderitem_set.count(), 2)
self.add_coupon(self.course_key, True, self.coupon_code)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
resp = self.client.post(reverse('shoppingcart.views.clear_cart', args=[]))
self.assertEqual(resp.status_code, 200)
self.assertEquals(self.cart.orderitem_set.count(), 0)
info_log.assert_called_with(
'Coupon redemption entry removed for user %s for order %s',
self.user,
reg_item.id
)
def test_add_course_to_cart_already_registered(self):
CourseEnrollment.enroll(self.user, self.course_key)
self.login_user()
resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()]))
self.assertEqual(resp.status_code, 400)
self.assertIn('You are already registered in course {0}.'.format(self.course_key.to_deprecated_string()), resp.content)
def test_add_nonexistent_course_to_cart(self):
self.login_user()
resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=['non/existent/course']))
self.assertEqual(resp.status_code, 404)
self.assertIn("The course you requested does not exist.", resp.content)
def test_add_course_to_cart_success(self):
self.login_user()
reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()])
resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()]))
self.assertEqual(resp.status_code, 200)
self.assertTrue(PaidCourseRegistration.contained_in_order(self.cart, self.course_key))
@patch('shoppingcart.views.render_purchase_form_html', form_mock)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_show_cart(self):
self.login_user()
reg_item = PaidCourseRegistration.add_to_order(
self.cart,
self.course_key,
mode_slug=self.course_mode.mode_slug
)
cert_item = CertificateItem.add_to_order(
self.cart,
self.verified_course_key,
self.cost,
self.course_mode.mode_slug
)
resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[]))
self.assertEqual(resp.status_code, 200)
((purchase_form_arg_cart,), _) = form_mock.call_args # pylint: disable=redefined-outer-name
purchase_form_arg_cart_items = purchase_form_arg_cart.orderitem_set.all().select_subclasses()
self.assertIn(reg_item, purchase_form_arg_cart_items)
self.assertIn(cert_item, purchase_form_arg_cart_items)
self.assertEqual(len(purchase_form_arg_cart_items), 2)
((template, context), _) = render_mock.call_args
self.assertEqual(template, 'shoppingcart/shopping_cart.html')
self.assertEqual(len(context['shoppingcart_items']), 2)
self.assertEqual(context['amount'], 80)
self.assertIn("80.00", context['form_html'])
# check for the default currency in the context
self.assertEqual(context['currency'], 'usd')
self.assertEqual(context['currency_symbol'], '$')
@patch('shoppingcart.views.render_purchase_form_html', form_mock)
@patch('shoppingcart.views.render_to_response', render_mock)
@override_settings(PAID_COURSE_REGISTRATION_CURRENCY=['PKR', 'Rs'])
def test_show_cart_with_override_currency_settings(self):
self.login_user()
reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key)
resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[]))
self.assertEqual(resp.status_code, 200)
((purchase_form_arg_cart,), _) = form_mock.call_args # pylint: disable=redefined-outer-name
purchase_form_arg_cart_items = purchase_form_arg_cart.orderitem_set.all().select_subclasses()
self.assertIn(reg_item, purchase_form_arg_cart_items)
((template, context), _) = render_mock.call_args
self.assertEqual(template, 'shoppingcart/shopping_cart.html')
# check for the override currency settings in the context
self.assertEqual(context['currency'], 'PKR')
self.assertEqual(context['currency_symbol'], 'Rs')
def test_clear_cart(self):
self.login_user()
PaidCourseRegistration.add_to_order(self.cart, self.course_key)
CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.assertEquals(self.cart.orderitem_set.count(), 2)
resp = self.client.post(reverse('shoppingcart.views.clear_cart', args=[]))
self.assertEqual(resp.status_code, 200)
self.assertEquals(self.cart.orderitem_set.count(), 0)
@patch('shoppingcart.views.log.exception')
def test_remove_item(self, exception_log):
self.login_user()
reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key)
cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.assertEquals(self.cart.orderitem_set.count(), 2)
resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]),
{'id': reg_item.id})
self.assertEqual(resp.status_code, 200)
self.assertEquals(self.cart.orderitem_set.count(), 1)
self.assertNotIn(reg_item, self.cart.orderitem_set.all().select_subclasses())
self.cart.purchase()
resp2 = self.client.post(reverse('shoppingcart.views.remove_item', args=[]),
{'id': cert_item.id})
self.assertEqual(resp2.status_code, 200)
exception_log.assert_called_with(
'Cannot remove cart OrderItem id=%s. DoesNotExist or item is already purchased', str(cert_item.id)
)
resp3 = self.client.post(
reverse('shoppingcart.views.remove_item', args=[]),
{'id': -1}
)
self.assertEqual(resp3.status_code, 200)
exception_log.assert_called_with(
'Cannot remove cart OrderItem id=%s. DoesNotExist or item is already purchased',
'-1'
)
@patch('shoppingcart.views.process_postpay_callback', postpay_mock)
def test_postpay_callback_success(self):
postpay_mock.return_value = {'success': True, 'order': self.cart}
self.login_user()
resp = self.client.post(reverse('shoppingcart.views.postpay_callback', args=[]))
self.assertEqual(resp.status_code, 302)
self.assertEqual(urlparse(resp.__getitem__('location')).path,
reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
@patch('shoppingcart.views.process_postpay_callback', postpay_mock)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_postpay_callback_failure(self):
postpay_mock.return_value = {'success': False, 'order': self.cart, 'error_html': 'ERROR_TEST!!!'}
self.login_user()
resp = self.client.post(reverse('shoppingcart.views.postpay_callback', args=[]))
self.assertEqual(resp.status_code, 200)
self.assertIn('ERROR_TEST!!!', resp.content)
((template, context), _) = render_mock.call_args
self.assertEqual(template, 'shoppingcart/error.html')
self.assertEqual(context['order'], self.cart)
self.assertEqual(context['error_html'], 'ERROR_TEST!!!')
@ddt.data(0, 1)
def test_show_receipt_json(self, num_items):
# Create the correct number of items in the order
for __ in range(num_items):
CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.cart.purchase()
self.login_user()
url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id])
resp = self.client.get(url, HTTP_ACCEPT="application/json")
# Should have gotten a successful response
self.assertEqual(resp.status_code, 200)
# Parse the response as JSON and check the contents
json_resp = json.loads(resp.content)
self.assertEqual(json_resp.get('currency'), self.cart.currency)
self.assertEqual(json_resp.get('purchase_datetime'), get_default_time_display(self.cart.purchase_time))
self.assertEqual(json_resp.get('total_cost'), self.cart.total_cost)
self.assertEqual(json_resp.get('status'), "purchased")
self.assertEqual(json_resp.get('billed_to'), {
'first_name': self.cart.bill_to_first,
'last_name': self.cart.bill_to_last,
'street1': self.cart.bill_to_street1,
'street2': self.cart.bill_to_street2,
'city': self.cart.bill_to_city,
'state': self.cart.bill_to_state,
'postal_code': self.cart.bill_to_postalcode,
'country': self.cart.bill_to_country
})
self.assertEqual(len(json_resp.get('items')), num_items)
for item in json_resp.get('items'):
self.assertEqual(item, {
'unit_cost': 40,
'quantity': 1,
'line_cost': 40,
'line_desc': '{} for course Test Course'.format(self.verified_course_mode.mode_display_name),
'course_key': unicode(self.verified_course_key)
})
def test_show_receipt_xss(self):
CertificateItem.add_to_order(self.cart, self.xss_course_key, self.cost, 'honor')
self.cart.purchase()
self.login_user()
url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id])
resp = self.client.get(url)
self.assert_no_xss(resp, '<script>alert("XSS")</script>')
@patch('shoppingcart.views.render_to_response', render_mock)
def test_reg_code_xss(self):
self.add_reg_code(self.xss_course_key)
# One courses in user shopping cart
self.add_course_to_user_cart(self.xss_course_key)
self.assertEquals(self.cart.orderitem_set.count(), 1)
post_response = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code})
self.assertEqual(post_response.status_code, 200)
redeem_url = reverse('register_code_redemption', args=[self.reg_code])
redeem_response = self.client.get(redeem_url)
self.assert_no_xss(redeem_response, '<script>alert("XSS")</script>')
def test_show_receipt_json_multiple_items(self):
# Two different item types
PaidCourseRegistration.add_to_order(
self.cart,
self.course_key,
mode_slug=self.course_mode.mode_slug
)
CertificateItem.add_to_order(
self.cart,
self.verified_course_key,
self.cost,
self.verified_course_mode.mode_slug
)
self.cart.purchase()
self.login_user()
url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id])
resp = self.client.get(url, HTTP_ACCEPT="application/json")
# Should have gotten a successful response
self.assertEqual(resp.status_code, 200)
# Parse the response as JSON and check the contents
json_resp = json.loads(resp.content)
self.assertEqual(json_resp.get('total_cost'), self.cart.total_cost)
items = json_resp.get('items')
self.assertEqual(len(items), 2)
self.assertEqual(items[0], {
'unit_cost': 40,
'quantity': 1,
'line_cost': 40,
'line_desc': 'Registration for Course: Robot Super Course',
'course_key': unicode(self.course_key)
})
self.assertEqual(items[1], {
'unit_cost': 40,
'quantity': 1,
'line_cost': 40,
'line_desc': '{} for course Test Course'.format(self.verified_course_mode.mode_display_name),
'course_key': unicode(self.verified_course_key)
})
def test_receipt_json_refunded(self):
mock_enrollment = Mock()
mock_enrollment.refundable.side_effect = lambda: True
mock_enrollment.course_id = self.verified_course_key
mock_enrollment.user = self.user
CourseMode.objects.create(
course_id=self.verified_course_key,
mode_slug="verified",
mode_display_name="verified cert",
min_price=self.cost
)
cert = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'verified')
self.cart.purchase()
cert.refund_cert_callback(course_enrollment=mock_enrollment)
self.login_user()
url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id])
resp = self.client.get(url, HTTP_ACCEPT="application/json")
self.assertEqual(resp.status_code, 200)
json_resp = json.loads(resp.content)
self.assertEqual(json_resp.get('status'), 'refunded')
def test_show_receipt_404s(self):
PaidCourseRegistration.add_to_order(self.cart, self.course_key)
CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.cart.purchase()
user2 = UserFactory.create()
cart2 = Order.get_cart_for_user(user2)
PaidCourseRegistration.add_to_order(cart2, self.course_key)
cart2.purchase()
self.login_user()
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[cart2.id]))
self.assertEqual(resp.status_code, 404)
resp2 = self.client.get(reverse('shoppingcart.views.show_receipt', args=[1000]))
self.assertEqual(resp2.status_code, 404)
def test_total_amount_of_purchased_course(self):
self.add_course_to_user_cart(self.course_key)
self.assertEquals(self.cart.orderitem_set.count(), 1)
self.add_coupon(self.course_key, True, self.coupon_code)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
# Total amount of a particular course that is purchased by different users
total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course_key)
self.assertEqual(total_amount, 36)
self.client.login(username=self.instructor.username, password="test")
cart = Order.get_cart_for_user(self.instructor)
PaidCourseRegistration.add_to_order(cart, self.course_key, mode_slug=self.course_mode.mode_slug)
cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course_key)
self.assertEqual(total_amount, 76)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_show_receipt_success_with_valid_coupon_code(self):
self.add_course_to_user_cart(self.course_key)
self.add_coupon(self.course_key, True, self.coupon_code)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
self.assertEqual(resp.status_code, 200)
self.assertIn('FirstNameTesting123', resp.content)
self.assertIn(str(self.get_discount(self.cost)), resp.content)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_reg_code_and_course_registration_scenario(self):
self.add_reg_code(self.course_key)
# One courses in user shopping cart
self.add_course_to_user_cart(self.course_key)
self.assertEquals(self.cart.orderitem_set.count(), 1)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code})
self.assertEqual(resp.status_code, 200)
redeem_url = reverse('register_code_redemption', args=[self.reg_code])
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
#now activate the user by enrolling him/her to the course
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_reg_code_with_multiple_courses_and_checkout_scenario(self):
self.add_reg_code(self.course_key)
# Two courses in user shopping cart
self.login_user()
PaidCourseRegistration.add_to_order(self.cart, self.course_key, mode_slug=self.course_mode.mode_slug)
item2 = PaidCourseRegistration.add_to_order(
self.cart,
self.testing_course.id,
mode_slug=self.course_mode.mode_slug
)
self.assertEquals(self.cart.orderitem_set.count(), 2)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code})
self.assertEqual(resp.status_code, 200)
redeem_url = reverse('register_code_redemption', args=[self.reg_code])
resp = self.client.get(redeem_url)
self.assertEquals(resp.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in resp.content)
#now activate the user by enrolling him/her to the course
resp = self.client.post(redeem_url)
self.assertEquals(resp.status_code, 200)
resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[]))
self.assertIn('Payment', resp.content)
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
self.assertEqual(resp.status_code, 200)
((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name
self.assertEqual(template, 'shoppingcart/receipt.html')
self.assertEqual(context['order'], self.cart)
self.assertEqual(context['order'].total_cost, self.testing_cost)
course_enrollment = CourseEnrollment.objects.filter(user=self.user)
self.assertEqual(course_enrollment.count(), 2)
# make sure the enrollment_ids were stored in the PaidCourseRegistration items
# refetch them first since they are updated
# item1 has been deleted from the the cart.
# User has been enrolled for the item1
item2 = PaidCourseRegistration.objects.get(id=item2.id)
self.assertIsNotNone(item2.course_enrollment)
self.assertEqual(item2.course_enrollment.course_id, self.testing_course.id)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_show_receipt_success_with_valid_reg_code(self):
self.add_course_to_user_cart(self.course_key)
self.add_reg_code(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code})
self.assertEqual(resp.status_code, 200)
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
self.assertEqual(resp.status_code, 200)
self.assertIn('0.00', resp.content)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_show_receipt_success(self):
reg_item = PaidCourseRegistration.add_to_order(
self.cart,
self.course_key,
mode_slug=self.course_mode.mode_slug
)
cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
self.login_user()
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
self.assertEqual(resp.status_code, 200)
self.assertIn('FirstNameTesting123', resp.content)
self.assertIn('80.00', resp.content)
((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name
self.assertEqual(template, 'shoppingcart/receipt.html')
self.assertEqual(context['order'], self.cart)
self.assertIn(reg_item, context['shoppingcart_items'][0])
self.assertIn(cert_item, context['shoppingcart_items'][1])
self.assertFalse(context['any_refunds'])
# check for the default currency settings in the context
self.assertEqual(context['currency_symbol'], '$')
self.assertEqual(context['currency'], 'usd')
@override_settings(PAID_COURSE_REGISTRATION_CURRENCY=['PKR', 'Rs'])
@patch('shoppingcart.views.render_to_response', render_mock)
def test_show_receipt_success_with_override_currency_settings(self):
reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key)
cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
self.login_user()
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
self.assertEqual(resp.status_code, 200)
((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name
self.assertEqual(template, 'shoppingcart/receipt.html')
self.assertIn(reg_item, context['shoppingcart_items'][0])
self.assertIn(cert_item, context['shoppingcart_items'][1])
# check for the override currency settings in the context
self.assertEqual(context['currency_symbol'], 'Rs')
self.assertEqual(context['currency'], 'PKR')
@patch('shoppingcart.views.render_to_response', render_mock)
def test_courseregcode_item_total_price(self):
self.cart.order_type = 'business'
self.cart.save()
CourseRegCodeItem.add_to_order(self.cart, self.course_key, 2, mode_slug=self.course_mode.mode_slug)
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
self.assertEquals(CourseRegCodeItem.get_total_amount_of_purchased_item(self.course_key), 80)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_show_receipt_success_with_order_type_business(self):
self.cart.order_type = 'business'
self.cart.save()
reg_item = CourseRegCodeItem.add_to_order(
self.cart,
self.course_key,
2,
mode_slug=self.course_mode.mode_slug
)
self.cart.add_billing_details(company_name='T1Omega', company_contact_name='C1',
company_contact_email='test@t1.com', recipient_email='test@t2.com')
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
# mail is sent to these emails recipient_email, company_contact_email, order.user.email
self.assertEquals(len(mail.outbox), 3)
self.login_user()
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
self.assertEqual(resp.status_code, 200)
# when order_type = 'business' the user is not enrolled in the
# course but presented with the enrollment links
self.assertFalse(CourseEnrollment.is_enrolled(self.cart.user, self.course_key))
self.assertIn('FirstNameTesting123', resp.content)
self.assertIn('80.00', resp.content)
# check for the enrollment codes content
self.assertIn('Please send each professional one of these unique registration codes to enroll into the course.', resp.content)
# fetch the newly generated registration codes
course_registration_codes = CourseRegistrationCode.objects.filter(order=self.cart)
((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name
self.assertEqual(template, 'shoppingcart/receipt.html')
self.assertEqual(context['order'], self.cart)
self.assertIn(reg_item, context['shoppingcart_items'][0])
# now check for all the registration codes in the receipt
# and all the codes should be unused at this point
self.assertIn(course_registration_codes[0].code, context['reg_code_info_list'][0]['code'])
self.assertIn(course_registration_codes[1].code, context['reg_code_info_list'][1]['code'])
self.assertFalse(context['reg_code_info_list'][0]['is_redeemed'])
self.assertFalse(context['reg_code_info_list'][1]['is_redeemed'])
self.assertIn(self.cart.purchase_time.strftime("%B %d, %Y"), resp.content)
self.assertIn(self.cart.company_name, resp.content)
self.assertIn(self.cart.company_contact_name, resp.content)
self.assertIn(self.cart.company_contact_email, resp.content)
self.assertIn(self.cart.recipient_email, resp.content)
self.assertIn("Invoice #{order_id}".format(order_id=self.cart.id), resp.content)
self.assertIn('You have successfully purchased <b>{total_registration_codes} course registration codes'
.format(total_registration_codes=context['total_registration_codes']), resp.content)
# now redeem one of registration code from the previous order
redeem_url = reverse('register_code_redemption', args=[context['reg_code_info_list'][0]['code']])
#now activate the user by enrolling him/her to the course
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
self.assertTrue('View Dashboard' in response.content)
# now view the receipt page again to see if any registration codes
# has been expired or not
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
self.assertEqual(resp.status_code, 200)
((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name
self.assertEqual(template, 'shoppingcart/receipt.html')
# now check for all the registration codes in the receipt
# and one of code should be used at this point
self.assertTrue(context['reg_code_info_list'][0]['is_redeemed'])
self.assertFalse(context['reg_code_info_list'][1]['is_redeemed'])
@patch('shoppingcart.views.render_to_response', render_mock)
def test_show_receipt_success_with_upgrade(self):
reg_item = PaidCourseRegistration.add_to_order(
self.cart,
self.course_key,
mode_slug=self.course_mode.mode_slug
)
cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
self.login_user()
self.mock_tracker.emit.reset_mock() # pylint: disable=maybe-no-member
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
self.assertEqual(resp.status_code, 200)
self.assertIn('FirstNameTesting123', resp.content)
self.assertIn('80.00', resp.content)
((template, context), _) = render_mock.call_args
# When we come from the upgrade flow, we get these context variables
self.assertEqual(template, 'shoppingcart/receipt.html')
self.assertEqual(context['order'], self.cart)
self.assertIn(reg_item, context['shoppingcart_items'][0])
self.assertIn(cert_item, context['shoppingcart_items'][1])
self.assertFalse(context['any_refunds'])
@patch('shoppingcart.views.render_to_response', render_mock)
def test_show_receipt_success_refund(self):
reg_item = PaidCourseRegistration.add_to_order(
self.cart,
self.course_key,
mode_slug=self.course_mode.mode_slug
)
cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
cert_item.status = "refunded"
cert_item.save()
self.assertEqual(self.cart.total_cost, 40)
self.login_user()
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
self.assertEqual(resp.status_code, 200)
self.assertIn('40.00', resp.content)
((template, context), _tmp) = render_mock.call_args
self.assertEqual(template, 'shoppingcart/receipt.html')
self.assertEqual(context['order'], self.cart)
self.assertIn(reg_item, context['shoppingcart_items'][0])
self.assertIn(cert_item, context['shoppingcart_items'][1])
self.assertTrue(context['any_refunds'])
@patch('shoppingcart.views.render_to_response', render_mock)
def test_show_receipt_success_custom_receipt_page(self):
cert_item = CertificateItem.add_to_order(self.cart, self.course_key, self.cost, 'honor')
self.cart.purchase()
self.login_user()
receipt_url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id])
resp = self.client.get(receipt_url)
self.assertEqual(resp.status_code, 200)
((template, _context), _tmp) = render_mock.call_args
self.assertEqual(template, cert_item.single_item_receipt_template)
def _assert_404(self, url, use_post=False):
"""
Helper method to assert that a given url will return a 404 status code
"""
if use_post:
response = self.client.post(url)
else:
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': False})
def test_disabled_paid_courses(self):
"""
Assert that the pages that require ENABLE_PAID_COURSE_REGISTRATION=True return a
HTTP 404 status code when we have this flag turned off
"""
self.login_user()
self._assert_404(reverse('shoppingcart.views.show_cart', args=[]))
self._assert_404(reverse('shoppingcart.views.clear_cart', args=[]))
self._assert_404(reverse('shoppingcart.views.remove_item', args=[]), use_post=True)
self._assert_404(reverse('shoppingcart.views.register_code_redemption', args=["testing"]))
self._assert_404(reverse('shoppingcart.views.use_code', args=[]), use_post=True)
self._assert_404(reverse('shoppingcart.views.update_user_cart', args=[]))
self._assert_404(reverse('shoppingcart.views.reset_code_redemption', args=[]), use_post=True)
self._assert_404(reverse('shoppingcart.views.billing_details', args=[]))
def test_upgrade_postpay_callback_emits_ga_event(self):
# Enroll as honor in the course with the current user.
CourseEnrollment.enroll(self.user, self.course_key)
# add verified mode
CourseMode.objects.create(
course_id=self.verified_course_key,
mode_slug="verified",
mode_display_name="verified cert",
min_price=self.cost
)
# Purchase a verified certificate
self.cart = Order.get_cart_for_user(self.user)
CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'verified')
self.cart.start_purchase()
self.login_user()
# setting the attempting upgrade session value.
session = self.client.session
session['attempting_upgrade'] = True
session.save()
ordered_params = OrderedDict([
('amount', self.cost),
('currency', 'usd'),
('transaction_type', 'sale'),
('orderNumber', str(self.cart.id)),
('access_key', '123456789'),
('merchantID', 'edx'),
('djch', '012345678912'),
('orderPage_version', 2),
('orderPage_serialNumber', '1234567890'),
('profile_id', "00000001"),
('reference_number', str(self.cart.id)),
('locale', 'en'),
('signed_date_time', '2014-08-18T13:59:31Z'),
])
resp_params = PaymentFakeView.response_post_params(sign(ordered_params))
self.assertTrue(self.client.session.get('attempting_upgrade'))
url = reverse('shoppingcart.views.postpay_callback')
self.client.post(url, resp_params, follow=True)
self.assertFalse(self.client.session.get('attempting_upgrade'))
self.mock_tracker.emit.assert_any_call( # pylint: disable=maybe-no-member
'edx.course.enrollment.upgrade.succeeded',
{
'user_id': self.user.id,
'course_id': self.verified_course_key.to_deprecated_string(),
'mode': 'verified'
}
)
def test_shopping_cart_navigation_link_not_in_microsite(self):
"""
Tests shopping cart link is available in navigation header if request is not from a microsite.
"""
CourseEnrollment.enroll(self.user, self.course_key)
self.add_course_to_user_cart(self.testing_course.id)
resp = self.client.get(reverse('courseware', kwargs={'course_id': unicode(self.course.id)}))
self.assertEqual(resp.status_code, 200)
self.assertIn('<a class="shopping-cart"', resp.content)
def test_shopping_cart_navigation_link_not_in_microsite_and_not_on_courseware(self):
"""
Tests shopping cart link is available in navigation header if request is not from a microsite
and requested page is not courseware too.
"""
CourseEnrollment.enroll(self.user, self.course_key)
self.add_course_to_user_cart(self.testing_course.id)
resp = self.client.get(reverse('dashboard'))
self.assertEqual(resp.status_code, 200)
self.assertIn('<a class="shopping-cart"', resp.content)
def test_shopping_cart_navigation_link_in_microsite_not_on_courseware(self):
"""
Tests shopping cart link is available in navigation header if request is from a microsite but requested
page is not from courseware.
"""
CourseEnrollment.enroll(self.user, self.course_key)
self.add_course_to_user_cart(self.testing_course.id)
with patch('microsite_configuration.microsite.is_request_in_microsite',
Mock(return_value=True)):
resp = self.client.get(reverse('dashboard'))
self.assertEqual(resp.status_code, 200)
self.assertIn('<a class="shopping-cart"', resp.content)
def test_shopping_cart_navigation_link_in_microsite_courseware_page(self):
"""
Tests shopping cart link is not available in navigation header if request is from a microsite
and requested page is from courseware.
"""
CourseEnrollment.enroll(self.user, self.course_key)
self.add_course_to_user_cart(self.testing_course.id)
with patch('microsite_configuration.microsite.is_request_in_microsite',
Mock(return_value=True)):
resp = self.client.get(reverse('courseware', kwargs={'course_id': unicode(self.course.id)}))
self.assertEqual(resp.status_code, 200)
self.assertNotIn('<a class="shopping-cart"', resp.content)
class ReceiptRedirectTest(SharedModuleStoreTestCase):
"""Test special-case redirect from the receipt page. """
COST = 40
PASSWORD = 'password'
@classmethod
def setUpClass(cls):
super(ReceiptRedirectTest, cls).setUpClass()
cls.course = CourseFactory.create()
cls.course_key = cls.course.id
def setUp(self):
super(ReceiptRedirectTest, self).setUp()
self.user = UserFactory.create()
self.user.set_password(self.PASSWORD)
self.user.save()
self.course_mode = CourseMode(
course_id=self.course_key,
mode_slug="verified",
mode_display_name="verified cert",
min_price=self.COST
)
self.course_mode.save()
self.cart = Order.get_cart_for_user(self.user)
self.client.login(
username=self.user.username,
password=self.PASSWORD
)
def test_postpay_callback_redirect_to_verify_student(self):
# Create other carts first
# This ensures that the order ID and order item IDs do not match
Order.get_cart_for_user(self.user).start_purchase()
Order.get_cart_for_user(self.user).start_purchase()
Order.get_cart_for_user(self.user).start_purchase()
# Purchase a verified certificate
self.cart = Order.get_cart_for_user(self.user)
CertificateItem.add_to_order(
self.cart,
self.course_key,
self.COST,
'verified'
)
self.cart.start_purchase()
# Simulate hitting the post-pay callback
with patch('shoppingcart.views.process_postpay_callback') as mock_process:
mock_process.return_value = {'success': True, 'order': self.cart}
url = reverse('shoppingcart.views.postpay_callback')
resp = self.client.post(url, follow=True)
# Expect to be redirected to the payment confirmation
# page in the verify_student app
redirect_url = reverse(
'verify_student_payment_confirmation',
kwargs={'course_id': unicode(self.course_key)}
)
redirect_url += '?payment-order-num={order_num}'.format(
order_num=self.cart.id
)
self.assertIn(redirect_url, resp.redirect_chain[0][0])
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
class ShoppingcartViewsClosedEnrollment(ModuleStoreTestCase):
"""
Test suite for ShoppingcartViews Course Enrollments Closed or not
"""
def setUp(self):
super(ShoppingcartViewsClosedEnrollment, self).setUp()
self.user = UserFactory.create()
self.user.set_password('password')
self.user.save()
self.instructor = AdminFactory.create()
self.cost = 40
self.course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course')
self.course_key = self.course.id
self.course_mode = CourseMode(
course_id=self.course_key,
mode_slug=CourseMode.HONOR,
mode_display_name="honor cert",
min_price=self.cost
)
self.course_mode.save()
self.testing_course = CourseFactory.create(
org='Edx',
number='999',
display_name='Testing Super Course',
metadata={"invitation_only": False}
)
self.testing_course_mode = CourseMode(
course_id=self.testing_course.id,
mode_slug=CourseMode.HONOR,
mode_display_name="honor cert",
min_price=self.cost
)
self.course_mode.save()
self.percentage_discount = 20.0
self.coupon_code = 'asdsad'
self.course_mode = CourseMode(course_id=self.testing_course.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=self.cost)
self.course_mode.save()
self.cart = Order.get_cart_for_user(self.user)
self.now = datetime.now(pytz.UTC)
self.tomorrow = self.now + timedelta(days=1)
self.nextday = self.tomorrow + timedelta(days=1)
def add_coupon(self, course_key, is_active, code):
"""
add dummy coupon into models
"""
coupon = Coupon(code=code, description='testing code', course_id=course_key,
percentage_discount=self.percentage_discount, created_by=self.user, is_active=is_active)
coupon.save()
def login_user(self):
"""
Helper fn to login self.user
"""
self.client.login(username=self.user.username, password="password")
@patch('shoppingcart.views.render_to_response', render_mock)
def test_to_check_that_cart_item_enrollment_is_closed(self):
self.login_user()
reg_item1 = PaidCourseRegistration.add_to_order(self.cart, self.course_key)
expired_course_item = PaidCourseRegistration.add_to_order(self.cart, self.testing_course.id)
# update the testing_course enrollment dates
self.testing_course.enrollment_start = self.tomorrow
self.testing_course.enrollment_end = self.nextday
self.testing_course = self.update_course(self.testing_course, self.user.id)
# now add the same coupon code to the second course(testing_course)
self.add_coupon(self.testing_course.id, True, self.coupon_code)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
coupon_redemption = CouponRedemption.objects.filter(coupon__course_id=expired_course_item.course_id,
order=expired_course_item.order_id)
self.assertEqual(coupon_redemption.count(), 1)
# testing_course enrollment is closed but the course is in the cart
# so we delete that item from the cart and display the message in the cart
# coupon redemption entry should also be deleted when the item is expired.
resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[]))
self.assertEqual(resp.status_code, 200)
self.assertIn("{course_name} has been removed because the enrollment period has closed.".format(course_name=self.testing_course.display_name), resp.content)
# now the redemption entry should be deleted from the table.
coupon_redemption = CouponRedemption.objects.filter(coupon__course_id=expired_course_item.course_id,
order=expired_course_item.order_id)
self.assertEqual(coupon_redemption.count(), 0)
((template, context), _tmp) = render_mock.call_args
self.assertEqual(template, 'shoppingcart/shopping_cart.html')
self.assertEqual(context['order'], self.cart)
self.assertIn(reg_item1, context['shoppingcart_items'][0])
self.assertEqual(1, len(context['shoppingcart_items']))
self.assertEqual(True, context['is_course_enrollment_closed'])
self.assertIn(self.testing_course.display_name, context['expired_course_names'])
def test_to_check_that_cart_item_enrollment_is_closed_when_clicking_the_payment_button(self):
self.login_user()
PaidCourseRegistration.add_to_order(
self.cart,
self.course_key,
mode_slug=self.course_mode.mode_slug
)
PaidCourseRegistration.add_to_order(
self.cart,
self.testing_course.id,
mode_slug=self.testing_course_mode.mode_slug
)
# update the testing_course enrollment dates
self.testing_course.enrollment_start = self.tomorrow
self.testing_course.enrollment_end = self.nextday
self.testing_course = self.update_course(self.testing_course, self.user.id)
# testing_course enrollment is closed but the course is in the cart
# so we delete that item from the cart and display the message in the cart
resp = self.client.get(reverse('shoppingcart.views.verify_cart'))
self.assertEqual(resp.status_code, 200)
self.assertTrue(json.loads(resp.content)['is_course_enrollment_closed'])
resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[]))
self.assertEqual(resp.status_code, 200)
self.assertIn("{course_name} has been removed because the enrollment period has closed.".format(course_name=self.testing_course.display_name), resp.content)
self.assertIn('40.00', resp.content)
def test_is_enrollment_closed_when_order_type_is_business(self):
self.login_user()
self.cart.order_type = 'business'
self.cart.save()
PaidCourseRegistration.add_to_order(self.cart, self.course_key, mode_slug=self.course_mode.mode_slug)
CourseRegCodeItem.add_to_order(self.cart, self.testing_course.id, 2, mode_slug=self.course_mode.mode_slug)
# update the testing_course enrollment dates
self.testing_course.enrollment_start = self.tomorrow
self.testing_course.enrollment_end = self.nextday
self.testing_course = self.update_course(self.testing_course, self.user.id)
resp = self.client.post(reverse('shoppingcart.views.billing_details'))
self.assertEqual(resp.status_code, 200)
self.assertTrue(json.loads(resp.content)['is_course_enrollment_closed'])
# testing_course enrollment is closed but the course is in the cart
# so we delete that item from the cart and display the message in the cart
resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[]))
self.assertEqual(resp.status_code, 200)
self.assertIn("{course_name} has been removed because the enrollment period has closed.".format(course_name=self.testing_course.display_name), resp.content)
self.assertIn('40.00', resp.content)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
class RegistrationCodeRedemptionCourseEnrollment(SharedModuleStoreTestCase):
"""
Test suite for RegistrationCodeRedemption Course Enrollments
"""
@classmethod
def setUpClass(cls):
super(RegistrationCodeRedemptionCourseEnrollment, cls).setUpClass()
cls.course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course')
cls.course_key = cls.course.id
def setUp(self, **kwargs):
super(RegistrationCodeRedemptionCourseEnrollment, self).setUp()
self.user = UserFactory.create()
self.user.set_password('password')
self.user.save()
self.cost = 40
self.course_mode = CourseMode(course_id=self.course_key,
mode_slug="honor",
mode_display_name="honor cert",
min_price=self.cost)
self.course_mode.save()
def login_user(self):
"""
Helper fn to login self.user
"""
self.client.login(username=self.user.username, password="password")
def test_registration_redemption_post_request_ratelimited(self):
"""
Try (and fail) registration code redemption 30 times
in a row on an non-existing registration code post request
"""
cache.clear()
url = reverse('register_code_redemption', args=['asdasd'])
self.login_user()
for i in xrange(30): # pylint: disable=unused-variable
response = self.client.post(url)
self.assertEquals(response.status_code, 404)
# then the rate limiter should kick in and give a HttpForbidden response
response = self.client.post(url)
self.assertEquals(response.status_code, 403)
# now reset the time to 5 mins from now in future in order to unblock
reset_time = datetime.now(UTC) + timedelta(seconds=300)
with freeze_time(reset_time):
response = self.client.post(url)
self.assertEquals(response.status_code, 404)
cache.clear()
def test_registration_redemption_get_request_ratelimited(self):
"""
Try (and fail) registration code redemption 30 times
in a row on an non-existing registration code get request
"""
cache.clear()
url = reverse('register_code_redemption', args=['asdasd'])
self.login_user()
for i in xrange(30): # pylint: disable=unused-variable
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
# then the rate limiter should kick in and give a HttpForbidden response
response = self.client.get(url)
self.assertEquals(response.status_code, 403)
# now reset the time to 5 mins from now in future in order to unblock
reset_time = datetime.now(UTC) + timedelta(seconds=300)
with freeze_time(reset_time):
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
cache.clear()
def test_course_enrollment_active_registration_code_redemption(self):
"""
Test for active registration code course enrollment
"""
cache.clear()
instructor = InstructorFactory(course_key=self.course_key)
self.client.login(username=instructor.username, password='test')
# Registration Code Generation only available to Sales Admins.
CourseSalesAdminRole(self.course.id).add_users(instructor)
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 12, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street',
'address_line_2': '', 'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data)
self.assertEquals(response.status_code, 200)
# get the first registration from the newly created registration codes
registration_code = CourseRegistrationCode.objects.all()[0].code
redeem_url = reverse('register_code_redemption', args=[registration_code])
self.login_user()
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertIn('Activate Course Enrollment', response.content)
#now activate the user by enrolling him/her to the course
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
self.assertIn('View Dashboard', response.content)
#now check that the registration code has already been redeemed and user is already registered in the course
RegistrationCodeRedemption.objects.filter(registration_code__code=registration_code)
response = self.client.get(redeem_url)
self.assertEquals(len(RegistrationCodeRedemption.objects.filter(registration_code__code=registration_code)), 1)
self.assertIn("You've clicked a link for an enrollment code that has already been used.", response.content)
#now check that the registration code has already been redeemed
response = self.client.post(redeem_url)
self.assertIn("You've clicked a link for an enrollment code that has already been used.", response.content)
#now check the response of the dashboard page
dashboard_url = reverse('dashboard')
response = self.client.get(dashboard_url)
self.assertEquals(response.status_code, 200)
self.assertIn(self.course.display_name, response.content)
@ddt.ddt
class RedeemCodeEmbargoTests(UrlResetMixin, ModuleStoreTestCase):
"""Test blocking redeem code redemption based on country access rules. """
USERNAME = 'bob'
PASSWORD = 'test'
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self):
super(RedeemCodeEmbargoTests, self).setUp('embargo')
self.course = CourseFactory.create()
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
result = self.client.login(username=self.user.username, password=self.PASSWORD)
self.assertTrue(result, msg="Could not log in")
@ddt.data('get', 'post')
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def test_registration_code_redemption_embargo(self, method):
# Create a valid registration code
reg_code = CourseRegistrationCode.objects.create(
code="abcd1234",
course_id=self.course.id,
created_by=self.user
)
# Try to redeem the code from a restricted country
with restrict_course(self.course.id) as redirect_url:
url = reverse(
'register_code_redemption',
kwargs={'registration_code': 'abcd1234'}
)
response = getattr(self.client, method)(url)
self.assertRedirects(response, redirect_url)
# The registration code should NOT be redeemed
is_redeemed = RegistrationCodeRedemption.objects.filter(
registration_code=reg_code
).exists()
self.assertFalse(is_redeemed)
# The user should NOT be enrolled
is_enrolled = CourseEnrollment.is_enrolled(self.user, self.course.id)
self.assertFalse(is_enrolled)
@ddt.ddt
class DonationViewTest(SharedModuleStoreTestCase):
"""Tests for making a donation.
These tests cover both the single-item purchase flow,
as well as the receipt page for donation items.
"""
DONATION_AMOUNT = "23.45"
PASSWORD = "password"
@classmethod
def setUpClass(cls):
super(DonationViewTest, cls).setUpClass()
cls.course = CourseFactory.create(display_name="Test Course")
def setUp(self):
"""Create a test user and order. """
super(DonationViewTest, self).setUp()
# Create and login a user
self.user = UserFactory.create()
self.user.set_password(self.PASSWORD)
self.user.save()
result = self.client.login(username=self.user.username, password=self.PASSWORD)
self.assertTrue(result)
# Enable donations
config = DonationConfiguration.current()
config.enabled = True
config.save()
def test_donation_for_org(self):
self._donate(self.DONATION_AMOUNT)
self._assert_receipt_contains("tax purposes")
def test_donation_for_course_receipt(self):
# Donate to our course
self._donate(self.DONATION_AMOUNT, course_id=self.course.id)
# Verify the receipt page
self._assert_receipt_contains("tax purposes")
self._assert_receipt_contains(self.course.display_name)
def test_smallest_possible_donation(self):
self._donate("0.01")
self._assert_receipt_contains("0.01")
@ddt.data(
{},
{"amount": "abcd"},
{"amount": "-1.00"},
{"amount": "0.00"},
{"amount": "0.001"},
{"amount": "0"},
{"amount": "23.45", "course_id": "invalid"}
)
def test_donation_bad_request(self, bad_params):
response = self.client.post(reverse('donation'), bad_params)
self.assertEqual(response.status_code, 400)
def test_donation_requires_login(self):
self.client.logout()
response = self.client.post(reverse('donation'), {'amount': self.DONATION_AMOUNT})
self.assertEqual(response.status_code, 302)
def test_no_such_course(self):
response = self.client.post(
reverse("donation"),
{"amount": self.DONATION_AMOUNT, "course_id": "edx/DemoX/Demo"}
)
self.assertEqual(response.status_code, 400)
@ddt.data("get", "put", "head", "options", "delete")
def test_donation_requires_post(self, invalid_method):
response = getattr(self.client, invalid_method)(
reverse("donation"), {"amount": self.DONATION_AMOUNT}
)
self.assertEqual(response.status_code, 405)
def test_donations_disabled(self):
config = DonationConfiguration.current()
config.enabled = False
config.save()
# Logged in -- should be a 404
response = self.client.post(reverse('donation'))
self.assertEqual(response.status_code, 404)
# Logged out -- should still be a 404
self.client.logout()
response = self.client.post(reverse('donation'))
self.assertEqual(response.status_code, 404)
def _donate(self, donation_amount, course_id=None):
"""Simulate a donation to a course.
This covers the entire payment flow, except for the external
payment processor, which is simulated.
Arguments:
donation_amount (unicode): The amount the user is donating.
Keyword Arguments:
course_id (CourseKey): If provided, make a donation to the specific course.
Raises:
AssertionError
"""
# Purchase a single donation item
# Optionally specify a particular course for the donation
params = {'amount': donation_amount}
if course_id is not None:
params['course_id'] = course_id
url = reverse('donation')
response = self.client.post(url, params)
self.assertEqual(response.status_code, 200)
# Use the fake payment implementation to simulate the parameters
# we would receive from the payment processor.
payment_info = json.loads(response.content)
self.assertEqual(payment_info["payment_url"], "/shoppingcart/payment_fake")
# If this is a per-course donation, verify that we're sending
# the course ID to the payment processor.
if course_id is not None:
self.assertEqual(
payment_info["payment_params"]["merchant_defined_data1"],
unicode(course_id)
)
self.assertEqual(
payment_info["payment_params"]["merchant_defined_data2"],
"donation_course"
)
else:
self.assertEqual(payment_info["payment_params"]["merchant_defined_data1"], "")
self.assertEqual(
payment_info["payment_params"]["merchant_defined_data2"],
"donation_general"
)
processor_response_params = PaymentFakeView.response_post_params(payment_info["payment_params"])
# Use the response parameters to simulate a successful payment
url = reverse('shoppingcart.views.postpay_callback')
response = self.client.post(url, processor_response_params)
self.assertRedirects(response, self._receipt_url)
def _assert_receipt_contains(self, expected_text):
"""Load the receipt page and verify that it contains the expected text."""
resp = self.client.get(self._receipt_url)
self.assertContains(resp, expected_text)
@property
def _receipt_url(self):
order_id = Order.objects.get(user=self.user, status="purchased").id
return reverse("shoppingcart.views.show_receipt", kwargs={"ordernum": order_id})
class CSVReportViewsTest(SharedModuleStoreTestCase):
"""
Test suite for CSV Purchase Reporting
"""
@classmethod
def setUpClass(cls):
super(CSVReportViewsTest, cls).setUpClass()
cls.course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course')
cls.course_key = cls.course.id
verified_course = CourseFactory.create(org='org', number='test', display_name='Test Course')
cls.verified_course_key = verified_course.id
def setUp(self):
super(CSVReportViewsTest, self).setUp()
self.user = UserFactory.create()
self.user.set_password('password')
self.user.save()
self.cost = 40
self.course_mode = CourseMode(course_id=self.course_key,
mode_slug="honor",
mode_display_name="honor cert",
min_price=self.cost)
self.course_mode.save()
self.course_mode2 = CourseMode(course_id=self.course_key,
mode_slug="verified",
mode_display_name="verified cert",
min_price=self.cost)
self.course_mode2.save()
self.cart = Order.get_cart_for_user(self.user)
self.dl_grp = Group(name=settings.PAYMENT_REPORT_GENERATOR_GROUP)
self.dl_grp.save()
def login_user(self):
"""
Helper fn to login self.user
"""
self.client.login(username=self.user.username, password="password")
def add_to_download_group(self, user):
"""
Helper fn to add self.user to group that's allowed to download report CSV
"""
user.groups.add(self.dl_grp)
def test_report_csv_no_access(self):
self.login_user()
response = self.client.get(reverse('payment_csv_report'))
self.assertEqual(response.status_code, 403)
def test_report_csv_bad_method(self):
self.login_user()
self.add_to_download_group(self.user)
response = self.client.put(reverse('payment_csv_report'))
self.assertEqual(response.status_code, 400)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_report_csv_get(self):
self.login_user()
self.add_to_download_group(self.user)
response = self.client.get(reverse('payment_csv_report'))
((template, context), unused_kwargs) = render_mock.call_args
self.assertEqual(template, 'shoppingcart/download_report.html')
self.assertFalse(context['total_count_error'])
self.assertFalse(context['date_fmt_error'])
self.assertIn("Download CSV Reports", response.content.decode('UTF-8'))
@patch('shoppingcart.views.render_to_response', render_mock)
def test_report_csv_bad_date(self):
self.login_user()
self.add_to_download_group(self.user)
response = self.client.post(reverse('payment_csv_report'), {'start_date': 'BAD', 'end_date': 'BAD', 'requested_report': 'itemized_purchase_report'})
((template, context), unused_kwargs) = render_mock.call_args
self.assertEqual(template, 'shoppingcart/download_report.html')
self.assertFalse(context['total_count_error'])
self.assertTrue(context['date_fmt_error'])
self.assertIn("There was an error in your date input. It should be formatted as YYYY-MM-DD",
response.content.decode('UTF-8'))
def test_report_csv_itemized(self):
report_type = 'itemized_purchase_report'
start_date = '1970-01-01'
end_date = '2100-01-01'
PaidCourseRegistration.add_to_order(self.cart, self.course_key, mode_slug=self.course_mode.mode_slug)
self.cart.purchase()
self.login_user()
self.add_to_download_group(self.user)
response = self.client.post(reverse('payment_csv_report'), {'start_date': start_date,
'end_date': end_date,
'requested_report': report_type})
self.assertEqual(response['Content-Type'], 'text/csv')
report = initialize_report(report_type, start_date, end_date)
self.assertIn(",".join(report.header()), response.content)
self.assertIn(
",1,purchased,1,40.00,40.00,usd,Registration for Course: Robot Super Course,",
response.content
)
def test_report_csv_university_revenue_share(self):
report_type = 'university_revenue_share'
start_date = '1970-01-01'
end_date = '2100-01-01'
start_letter = 'A'
end_letter = 'Z'
self.login_user()
self.add_to_download_group(self.user)
response = self.client.post(reverse('payment_csv_report'), {'start_date': start_date,
'end_date': end_date,
'start_letter': start_letter,
'end_letter': end_letter,
'requested_report': report_type})
self.assertEqual(response['Content-Type'], 'text/csv')
report = initialize_report(report_type, start_date, end_date, start_letter, end_letter)
self.assertIn(",".join(report.header()), response.content)
class UtilFnsTest(TestCase):
"""
Tests for utility functions in views.py
"""
def setUp(self):
super(UtilFnsTest, self).setUp()
self.user = UserFactory.create()
def test_can_download_report_no_group(self):
"""
Group controlling perms is not present
"""
self.assertFalse(_can_download_report(self.user))
def test_can_download_report_not_member(self):
"""
User is not part of group controlling perms
"""
Group(name=settings.PAYMENT_REPORT_GENERATOR_GROUP).save()
self.assertFalse(_can_download_report(self.user))
def test_can_download_report(self):
"""
User is part of group controlling perms
"""
grp = Group(name=settings.PAYMENT_REPORT_GENERATOR_GROUP)
grp.save()
self.user.groups.add(grp)
self.assertTrue(_can_download_report(self.user))
def test_get_date_from_str(self):
test_str = "2013-10-01"
date = _get_date_from_str(test_str)
self.assertEqual(2013, date.year)
self.assertEqual(10, date.month)
self.assertEqual(1, date.day)
|
agpl-3.0
|
Onager/plaso
|
plaso/analysis/interface.py
|
1
|
2822
|
# -*- coding: utf-8 -*-
"""This file contains the interface for analysis plugins."""
import abc
import calendar
import collections
import time
from plaso.analysis import definitions as analysis_definitions
from plaso.analysis import logger
from plaso.containers import events
from plaso.containers import reports
from plaso.lib import definitions
class AnalysisPlugin(object):
"""Class that defines the analysis plugin interface."""
# The name of the plugin. This is the name that is matched against when
# loading plugins, so it is important that this name is short, concise and
# explains the nature of the plugin easily. It also needs to be unique.
NAME = 'analysis_plugin'
def __init__(self):
"""Initializes an analysis plugin."""
super(AnalysisPlugin, self).__init__()
self._analysis_counter = collections.Counter()
self.plugin_type = analysis_definitions.PLUGIN_TYPE_REPORT
@property
def plugin_name(self):
"""str: name of the plugin."""
return self.NAME
def _CreateEventTag(self, event, labels):
"""Creates an event tag.
Args:
event (EventObject): event to tag.
labels (list[str]): event tag labels.
Returns:
EventTag: the event tag.
"""
event_identifier = event.GetIdentifier()
event_tag = events.EventTag()
event_tag.SetEventIdentifier(event_identifier)
event_tag.AddLabels(labels)
event_identifier_string = event_identifier.CopyToString()
logger.debug('Tagged event: {0:s} with labels: {1:s}'.format(
event_identifier_string, ', '.join(labels)))
return event_tag
# pylint: disable=unused-argument
def CompileReport(self, mediator):
"""Compiles a report of the analysis.
After the plugin has received every copy of an event to analyze this
function will be called so that the report can be assembled.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
Returns:
AnalysisReport: report.
"""
analysis_report = reports.AnalysisReport(plugin_name=self.NAME)
time_elements = time.gmtime()
time_compiled = calendar.timegm(time_elements)
analysis_report.time_compiled = (
time_compiled * definitions.MICROSECONDS_PER_SECOND)
analysis_report.analysis_counter = self._analysis_counter
return analysis_report
@abc.abstractmethod
def ExamineEvent(self, mediator, event, event_data, event_data_stream):
"""Analyzes an event.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
event (EventObject): event.
event_data (EventData): event data.
event_data_stream (EventDataStream): event data stream.
"""
|
apache-2.0
|
UniversalMasterEgg8679/ansible
|
lib/ansible/plugins/action/fail.py
|
227
|
1391
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2012, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' Fail with custom message '''
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
msg = 'Failed as requested from task'
if self._task.args and 'msg' in self._task.args:
msg = self._task.args.get('msg')
result['failed'] = True
result['msg'] = msg
return result
|
gpl-3.0
|
pentestfail/TA-FireEye_TAP
|
bin/ta_fireeye_tap/solnlib/packages/requests/packages/chardet/escsm.py
|
2930
|
7839
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
HZ_cls = (
1,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,4,0,5,2,0, # 78 - 7f
1,1,1,1,1,1,1,1, # 80 - 87
1,1,1,1,1,1,1,1, # 88 - 8f
1,1,1,1,1,1,1,1, # 90 - 97
1,1,1,1,1,1,1,1, # 98 - 9f
1,1,1,1,1,1,1,1, # a0 - a7
1,1,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,1,1,1,1,1,1, # c0 - c7
1,1,1,1,1,1,1,1, # c8 - cf
1,1,1,1,1,1,1,1, # d0 - d7
1,1,1,1,1,1,1,1, # d8 - df
1,1,1,1,1,1,1,1, # e0 - e7
1,1,1,1,1,1,1,1, # e8 - ef
1,1,1,1,1,1,1,1, # f0 - f7
1,1,1,1,1,1,1,1, # f8 - ff
)
HZ_st = (
eStart,eError, 3,eStart,eStart,eStart,eError,eError,# 00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart, 4,eError,# 10-17
5,eError, 6,eError, 5, 5, 4,eError,# 18-1f
4,eError, 4, 4, 4,eError, 4,eError,# 20-27
4,eItsMe,eStart,eStart,eStart,eStart,eStart,eStart,# 28-2f
)
HZCharLenTable = (0, 0, 0, 0, 0, 0)
HZSMModel = {'classTable': HZ_cls,
'classFactor': 6,
'stateTable': HZ_st,
'charLenTable': HZCharLenTable,
'name': "HZ-GB-2312"}
ISO2022CN_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,4,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022CN_st = (
eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07
eStart,eError,eError,eError,eError,eError,eError,eError,# 08-0f
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17
eItsMe,eItsMe,eItsMe,eError,eError,eError, 4,eError,# 18-1f
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 20-27
5, 6,eError,eError,eError,eError,eError,eError,# 28-2f
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 30-37
eError,eError,eError,eError,eError,eItsMe,eError,eStart,# 38-3f
)
ISO2022CNCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022CNSMModel = {'classTable': ISO2022CN_cls,
'classFactor': 9,
'stateTable': ISO2022CN_st,
'charLenTable': ISO2022CNCharLenTable,
'name': "ISO-2022-CN"}
ISO2022JP_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,2,2, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,7,0,0,0, # 20 - 27
3,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
6,0,4,0,8,0,0,0, # 40 - 47
0,9,5,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022JP_st = (
eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07
eStart,eStart,eError,eError,eError,eError,eError,eError,# 08-0f
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,# 18-1f
eError, 5,eError,eError,eError, 4,eError,eError,# 20-27
eError,eError,eError, 6,eItsMe,eError,eItsMe,eError,# 28-2f
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,# 30-37
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 38-3f
eError,eError,eError,eError,eItsMe,eError,eStart,eStart,# 40-47
)
ISO2022JPCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022JPSMModel = {'classTable': ISO2022JP_cls,
'classFactor': 10,
'stateTable': ISO2022JP_st,
'charLenTable': ISO2022JPCharLenTable,
'name': "ISO-2022-JP"}
ISO2022KR_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,3,0,0,0, # 20 - 27
0,4,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,5,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022KR_st = (
eStart, 3,eError,eStart,eStart,eStart,eError,eError,# 00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f
eItsMe,eItsMe,eError,eError,eError, 4,eError,eError,# 10-17
eError,eError,eError,eError, 5,eError,eError,eError,# 18-1f
eError,eError,eError,eItsMe,eStart,eStart,eStart,eStart,# 20-27
)
ISO2022KRCharLenTable = (0, 0, 0, 0, 0, 0)
ISO2022KRSMModel = {'classTable': ISO2022KR_cls,
'classFactor': 6,
'stateTable': ISO2022KR_st,
'charLenTable': ISO2022KRCharLenTable,
'name': "ISO-2022-KR"}
# flake8: noqa
|
mit
|
dsbrown/FreeCAD
|
src/Gui/FreeCADGuiInit.py
|
9
|
5995
|
# FreeCAD gui init module
# (c) 2003 Juergen Riegel
#
# Gathering all the information to start FreeCAD
# This is the second one of three init scripts, the third one
# runs when the gui is up
#***************************************************************************
#* (c) Juergen Riegel (juergen.riegel@web.de) 2002 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2002 *
#***************************************************************************/
# imports the one and only
import FreeCAD, FreeCADGui
# shortcuts
Gui = FreeCADGui
# Important definitions
class Workbench:
"""The workbench base class."""
MenuText = ""
ToolTip = ""
def Initialize(self):
"""Initializes this workbench."""
App.PrintWarning(str(self) + ": Workbench.Initialize() not implemented in subclass!")
def ContextMenu(self, recipient):
pass
def appendToolbar(self,name,cmds):
self.__Workbench__.appendToolbar(name, cmds)
def removeToolbar(self,name):
self.__Workbench__.removeToolbar(name)
def appendCommandbar(self,name,cmds):
self.__Workbench__.appendCommandbar(name, cmds)
def removeCommandbar(self,name):
self.__Workbench__.removeCommandbar(name)
def appendMenu(self,name,cmds):
self.__Workbench__.appendMenu(name, cmds)
def removeMenu(self,name):
self.__Workbench__.removeMenu(name)
def listMenus(self):
return self.__Workbench__.listMenus()
def appendContextMenu(self,name,cmds):
self.__Workbench__.appendContextMenu(name, cmds)
def removeContextMenu(self,name):
self.__Workbench__.removeContextMenu(name)
def name(self):
return self.__Workbench__.name()
def GetClassName(self):
"""Return the name of the associated C++ class."""
# as default use this to simplify writing workbenches in Python
return "Gui::PythonWorkbench"
class StandardWorkbench ( Workbench ):
"""A workbench defines the tool bars, command bars, menus,
context menu and dockable windows of the main window.
"""
def Initialize(self):
"""Initialize this workbench."""
# load the module
Log ('Init: Loading FreeCAD GUI\n')
def GetClassName(self):
"""Return the name of the associated C++ class."""
return "Gui::StdWorkbench"
class NoneWorkbench ( Workbench ):
"""An empty workbench."""
MenuText = "<none>"
ToolTip = "The default empty workbench"
def Initialize(self):
"""Initialize this workbench."""
# load the module
Log ('Init: Loading FreeCAD GUI\n')
def GetClassName(self):
"""Return the name of the associated C++ class."""
return "Gui::NoneWorkbench"
def InitApplications():
import sys,os
# Searching modules dirs +++++++++++++++++++++++++++++++++++++++++++++++++++
# (additional module paths are already cached)
ModDirs = FreeCAD.__path__
#print ModDirs
Log('Init: Searching modules...\n')
for Dir in ModDirs:
if ((Dir != '') & (Dir != 'CVS') & (Dir != '__init__.py')):
InstallFile = os.path.join(Dir,"InitGui.py")
if (os.path.exists(InstallFile)):
try:
#execfile(InstallFile)
exec open(InstallFile).read()
except Exception, inst:
Log('Init: Initializing ' + Dir + '... failed\n')
Err('During initialization the error ' + str(inst) + ' occurred in ' + InstallFile + '\n')
else:
Log('Init: Initializing ' + Dir + '... done\n')
else:
Log('Init: Initializing ' + Dir + '(InitGui.py not found)... ignore\n')
Log ('Init: Running FreeCADGuiInit.py start script...\n')
# init the gui
# signal that the gui is up
App.GuiUp = 1
App.Gui = FreeCADGui
Gui.addWorkbench(NoneWorkbench())
# init modules
InitApplications()
# set standard workbench (needed as fallback)
Gui.activateWorkbench("NoneWorkbench")
# Register .py, .FCScript and .FCMacro
FreeCAD.addImportType("Inventor V2.1 (*.iv)","FreeCADGui")
FreeCAD.addImportType("VRML V2.0 (*.wrl *.vrml *.wrz *.wrl.gz)","FreeCADGui")
FreeCAD.addImportType("Python (*.py *.FCMacro *.FCScript)","FreeCADGui")
FreeCAD.addExportType("Inventor V2.1 (*.iv)","FreeCADGui")
FreeCAD.addExportType("VRML V2.0 (*.wrl *.vrml *.wrz *.wrl.gz)","FreeCADGui")
#FreeCAD.addExportType("IDTF (for 3D PDF) (*.idtf)","FreeCADGui")
#FreeCAD.addExportType("3D View (*.svg)","FreeCADGui")
FreeCAD.addExportType("Portable Document Format (*.pdf)","FreeCADGui")
del(InitApplications)
del(NoneWorkbench)
del(StandardWorkbench)
Log ('Init: Running FreeCADGuiInit.py start script... done\n')
|
lgpl-2.1
|
radlab/sparrow
|
deploy/third_party/boto-2.1.1/boto/vpc/dhcpoptions.py
|
20
|
2485
|
# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a DHCP Options set
"""
from boto.ec2.ec2object import TaggedEC2Object
class DhcpValueSet(list):
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'value':
self.append(value)
class DhcpConfigSet(dict):
def startElement(self, name, attrs, connection):
if name == 'valueSet':
if not self.has_key(self._name):
self[self._name] = DhcpValueSet()
return self[self._name]
def endElement(self, name, value, connection):
if name == 'key':
self._name = value
class DhcpOptions(TaggedEC2Object):
def __init__(self, connection=None):
TaggedEC2Object.__init__(self, connection)
self.id = None
self.options = None
def __repr__(self):
return 'DhcpOptions:%s' % self.id
def startElement(self, name, attrs, connection):
retval = TaggedEC2Object.startElement(self, name, attrs, connection)
if retval is not None:
return retval
if name == 'dhcpConfigurationSet':
self.options = DhcpConfigSet()
return self.options
def endElement(self, name, value, connection):
if name == 'dhcpOptionsId':
self.id = value
else:
setattr(self, name, value)
|
apache-2.0
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.1/Lib/fileinput.py
|
4
|
9897
|
"""Helper class to quickly write a loop over all standard input files.
Typical use is:
import fileinput
for line in fileinput.input():
process(line)
This iterates over the lines of all files listed in sys.argv[1:],
defaulting to sys.stdin if the list is empty. If a filename is '-' it
is also replaced by sys.stdin. To specify an alternative list of
filenames, pass it as the argument to input(). A single file name is
also allowed.
Functions filename(), lineno() return the filename and cumulative line
number of the line that has just been read; filelineno() returns its
line number in the current file; isfirstline() returns true iff the
line just read is the first line of its file; isstdin() returns true
iff the line was read from sys.stdin. Function nextfile() closes the
current file so that the next iteration will read the first line from
the next file (if any); lines not read from the file will not count
towards the cumulative line count; the filename is not changed until
after the first line of the next file has been read. Function close()
closes the sequence.
Before any lines have been read, filename() returns None and both line
numbers are zero; nextfile() has no effect. After all lines have been
read, filename() and the line number functions return the values
pertaining to the last line read; nextfile() has no effect.
All files are opened in text mode. If an I/O error occurs during
opening or reading a file, the IOError exception is raised.
If sys.stdin is used more than once, the second and further use will
return no lines, except perhaps for interactive use, or if it has been
explicitly reset (e.g. using sys.stdin.seek(0)).
Empty files are opened and immediately closed; the only time their
presence in the list of filenames is noticeable at all is when the
last file opened is empty.
It is possible that the last line of a file doesn't end in a newline
character; otherwise lines are returned including the trailing
newline.
Class FileInput is the implementation; its methods filename(),
lineno(), fileline(), isfirstline(), isstdin(), nextfile() and close()
correspond to the functions in the module. In addition it has a
readline() method which returns the next input line, and a
__getitem__() method which implements the sequence behavior. The
sequence must be accessed in strictly sequential order; sequence
access and readline() cannot be mixed.
Optional in-place filtering: if the keyword argument inplace=1 is
passed to input() or to the FileInput constructor, the file is moved
to a backup file and standard output is directed to the input file.
This makes it possible to write a filter that rewrites its input file
in place. If the keyword argument backup=".<some extension>" is also
given, it specifies the extension for the backup file, and the backup
file remains around; by default, the extension is ".bak" and it is
deleted when the output file is closed. In-place filtering is
disabled when standard input is read. XXX The current implementation
does not work for MS-DOS 8+3 filesystems.
Performance: this module is unfortunately one of the slower ways of
processing large numbers of input lines. Nevertheless, a significant
speed-up has been obtained by using readlines(bufsize) instead of
readline(). A new keyword argument, bufsize=N, is present on the
input() function and the FileInput() class to override the default
buffer size.
XXX Possible additions:
- optional getopt argument processing
- specify open mode ('r' or 'rb')
- fileno()
- isatty()
- read(), read(size), even readlines()
"""
import sys, os, stat
__all__ = ["input","close","nextfile","filename","lineno","filelineno",
"isfirstline","isstdin","FileInput"]
_state = None
DEFAULT_BUFSIZE = 8*1024
def input(files=None, inplace=0, backup="", bufsize=0):
global _state
if _state and _state._file:
raise RuntimeError, "input() already active"
_state = FileInput(files, inplace, backup, bufsize)
return _state
def close():
global _state
state = _state
_state = None
if state:
state.close()
def nextfile():
if not _state:
raise RuntimeError, "no active input()"
return _state.nextfile()
def filename():
if not _state:
raise RuntimeError, "no active input()"
return _state.filename()
def lineno():
if not _state:
raise RuntimeError, "no active input()"
return _state.lineno()
def filelineno():
if not _state:
raise RuntimeError, "no active input()"
return _state.filelineno()
def isfirstline():
if not _state:
raise RuntimeError, "no active input()"
return _state.isfirstline()
def isstdin():
if not _state:
raise RuntimeError, "no active input()"
return _state.isstdin()
class FileInput:
def __init__(self, files=None, inplace=0, backup="", bufsize=0):
if type(files) == type(''):
files = (files,)
else:
if files is None:
files = sys.argv[1:]
if not files:
files = ('-',)
else:
files = tuple(files)
self._files = files
self._inplace = inplace
self._backup = backup
self._bufsize = bufsize or DEFAULT_BUFSIZE
self._savestdout = None
self._output = None
self._filename = None
self._lineno = 0
self._filelineno = 0
self._file = None
self._isstdin = 0
self._backupfilename = None
self._buffer = []
self._bufindex = 0
def __del__(self):
self.close()
def close(self):
self.nextfile()
self._files = ()
def __getitem__(self, i):
try:
line = self._buffer[self._bufindex]
except IndexError:
pass
else:
self._bufindex += 1
self._lineno += 1
self._filelineno += 1
return line
if i != self._lineno:
raise RuntimeError, "accessing lines out of order"
line = self.readline()
if not line:
raise IndexError, "end of input reached"
return line
def nextfile(self):
savestdout = self._savestdout
self._savestdout = 0
if savestdout:
sys.stdout = savestdout
output = self._output
self._output = 0
if output:
output.close()
file = self._file
self._file = 0
if file and not self._isstdin:
file.close()
backupfilename = self._backupfilename
self._backupfilename = 0
if backupfilename and not self._backup:
try: os.unlink(backupfilename)
except: pass
self._isstdin = 0
self._buffer = []
self._bufindex = 0
def readline(self):
try:
line = self._buffer[self._bufindex]
except IndexError:
pass
else:
self._bufindex += 1
self._lineno += 1
self._filelineno += 1
return line
if not self._file:
if not self._files:
return ""
self._filename = self._files[0]
self._files = self._files[1:]
self._filelineno = 0
self._file = None
self._isstdin = 0
self._backupfilename = 0
if self._filename == '-':
self._filename = '<stdin>'
self._file = sys.stdin
self._isstdin = 1
else:
if self._inplace:
self._backupfilename = (
self._filename + (self._backup or ".bak"))
try: os.unlink(self._backupfilename)
except os.error: pass
# The next few lines may raise IOError
os.rename(self._filename, self._backupfilename)
self._file = open(self._backupfilename, "r")
try:
perm = os.fstat(self._file.fileno())[stat.ST_MODE]
except:
self._output = open(self._filename, "w")
else:
fd = os.open(self._filename,
os.O_CREAT | os.O_WRONLY | os.O_TRUNC,
perm)
self._output = os.fdopen(fd, "w")
try:
os.chmod(self._filename, perm)
except:
pass
self._savestdout = sys.stdout
sys.stdout = self._output
else:
# This may raise IOError
self._file = open(self._filename, "r")
self._buffer = self._file.readlines(self._bufsize)
self._bufindex = 0
if not self._buffer:
self.nextfile()
# Recursive call
return self.readline()
def filename(self):
return self._filename
def lineno(self):
return self._lineno
def filelineno(self):
return self._filelineno
def isfirstline(self):
return self._filelineno == 1
def isstdin(self):
return self._isstdin
def _test():
import getopt
inplace = 0
backup = 0
opts, args = getopt.getopt(sys.argv[1:], "ib:")
for o, a in opts:
if o == '-i': inplace = 1
if o == '-b': backup = a
for line in input(args, inplace=inplace, backup=backup):
if line[-1:] == '\n': line = line[:-1]
if line[-1:] == '\r': line = line[:-1]
print "%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(),
isfirstline() and "*" or "", line)
print "%d: %s[%d]" % (lineno(), filename(), filelineno())
if __name__ == '__main__':
_test()
|
mit
|
michael-dev2rights/ansible
|
test/units/module_utils/basic/test_no_log.py
|
60
|
6904
|
# -*- coding: utf-8 -*-
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import json
import sys
import syslog
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.module_utils import basic
from ansible.module_utils.basic import heuristic_log_sanitize
from ansible.module_utils.basic import return_values, remove_values
class TestReturnValues(unittest.TestCase):
dataset = (
('string', frozenset(['string'])),
('', frozenset()),
(1, frozenset(['1'])),
(1.0, frozenset(['1.0'])),
(False, frozenset()),
(['1', '2', '3'], frozenset(['1', '2', '3'])),
(('1', '2', '3'), frozenset(['1', '2', '3'])),
({'one': 1, 'two': 'dos'}, frozenset(['1', 'dos'])),
(
{
'one': 1,
'two': 'dos',
'three': [
'amigos', 'musketeers', None, {
'ping': 'pong',
'base': (
'balls', 'raquets'
)
}
]
},
frozenset(['1', 'dos', 'amigos', 'musketeers', 'pong', 'balls', 'raquets'])
),
(u'Toshio くらとみ', frozenset(['Toshio くらとみ'])),
('Toshio くらとみ', frozenset(['Toshio くらとみ'])),
)
def test_return_values(self):
for data, expected in self.dataset:
self.assertEquals(frozenset(return_values(data)), expected)
def test_unknown_type(self):
self.assertRaises(TypeError, frozenset, return_values(object()))
class TestRemoveValues(unittest.TestCase):
OMIT = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
dataset_no_remove = (
('string', frozenset(['nope'])),
(1234, frozenset(['4321'])),
(False, frozenset(['4321'])),
(1.0, frozenset(['4321'])),
(['string', 'strang', 'strung'], frozenset(['nope'])),
({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['nope'])),
(
{
'one': 1,
'two': 'dos',
'three': [
'amigos', 'musketeers', None, {
'ping': 'pong', 'base': ['balls', 'raquets']
}
]
},
frozenset(['nope'])
),
(u'Toshio くら'.encode('utf-8'), frozenset([u'とみ'.encode('utf-8')])),
(u'Toshio くら', frozenset([u'とみ'])),
)
dataset_remove = (
('string', frozenset(['string']), OMIT),
(1234, frozenset(['1234']), OMIT),
(1234, frozenset(['23']), OMIT),
(1.0, frozenset(['1.0']), OMIT),
(['string', 'strang', 'strung'], frozenset(['strang']), ['string', OMIT, 'strung']),
(['string', 'strang', 'strung'], frozenset(['strang', 'string', 'strung']), [OMIT, OMIT, OMIT]),
(('string', 'strang', 'strung'), frozenset(['string', 'strung']), [OMIT, 'strang', OMIT]),
((1234567890, 345678, 987654321), frozenset(['1234567890']), [OMIT, 345678, 987654321]),
((1234567890, 345678, 987654321), frozenset(['345678']), [OMIT, OMIT, 987654321]),
({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['key']), {'one': 1, 'two': 'dos', 'secret': OMIT}),
({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['key', 'dos', '1']), {'one': OMIT, 'two': OMIT, 'secret': OMIT}),
({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['key', 'dos', '1']), {'one': OMIT, 'two': OMIT, 'secret': OMIT}),
(
{
'one': 1,
'two': 'dos',
'three': [
'amigos', 'musketeers', None, {
'ping': 'pong', 'base': [
'balls', 'raquets'
]
}
]
},
frozenset(['balls', 'base', 'pong', 'amigos']),
{
'one': 1,
'two': 'dos',
'three': [
OMIT, 'musketeers', None, {
'ping': OMIT,
'base': [
OMIT, 'raquets'
]
}
]
}
),
(
'This sentence has an enigma wrapped in a mystery inside of a secret. - mr mystery',
frozenset(['enigma', 'mystery', 'secret']),
'This sentence has an ******** wrapped in a ******** inside of a ********. - mr ********'
),
(u'Toshio くらとみ'.encode('utf-8'), frozenset([u'くらとみ'.encode('utf-8')]), u'Toshio ********'.encode('utf-8')),
(u'Toshio くらとみ', frozenset([u'くらとみ']), u'Toshio ********'),
)
def test_no_removal(self):
for value, no_log_strings in self.dataset_no_remove:
self.assertEquals(remove_values(value, no_log_strings), value)
def test_strings_to_remove(self):
for value, no_log_strings, expected in self.dataset_remove:
self.assertEquals(remove_values(value, no_log_strings), expected)
def test_unknown_type(self):
self.assertRaises(TypeError, remove_values, object(), frozenset())
def test_hit_recursion_limit(self):
""" Check that we do not hit a recursion limit"""
data_list = []
inner_list = data_list
for i in range(0, 10000):
new_list = []
inner_list.append(new_list)
inner_list = new_list
inner_list.append('secret')
# Check that this does not hit a recursion limit
actual_data_list = remove_values(data_list, frozenset(('secret',)))
levels = 0
inner_list = actual_data_list
while inner_list:
if isinstance(inner_list, list):
self.assertEquals(len(inner_list), 1)
else:
levels -= 1
break
inner_list = inner_list[0]
levels += 1
self.assertEquals(inner_list, self.OMIT)
self.assertEquals(levels, 10000)
|
gpl-3.0
|
eleonrk/SickRage
|
lib/sqlalchemy/testing/suite/test_insert.py
|
76
|
6861
|
from .. import fixtures, config
from ..config import requirements
from .. import exclusions
from ..assertions import eq_
from .. import engines
from sqlalchemy import Integer, String, select, util
from ..schema import Table, Column
class LastrowidTest(fixtures.TablesTest):
run_deletes = 'each'
__backend__ = True
__requires__ = 'implements_get_lastrowid', 'autoincrement_insert'
__engine_options__ = {"implicit_returning": False}
@classmethod
def define_tables(cls, metadata):
Table('autoinc_pk', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50))
)
Table('manual_pk', metadata,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('data', String(50))
)
def _assert_round_trip(self, table, conn):
row = conn.execute(table.select()).first()
eq_(
row,
(config.db.dialect.default_sequence_base, "some data")
)
def test_autoincrement_on_insert(self):
config.db.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
self._assert_round_trip(self.tables.autoinc_pk, config.db)
def test_last_inserted_id(self):
r = config.db.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
pk = config.db.scalar(select([self.tables.autoinc_pk.c.id]))
eq_(
r.inserted_primary_key,
[pk]
)
# failed on pypy1.9 but seems to be OK on pypy 2.1
#@exclusions.fails_if(lambda: util.pypy, "lastrowid not maintained after "
# "connection close")
@requirements.dbapi_lastrowid
def test_native_lastrowid_autoinc(self):
r = config.db.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
lastrowid = r.lastrowid
pk = config.db.scalar(select([self.tables.autoinc_pk.c.id]))
eq_(
lastrowid, pk
)
class InsertBehaviorTest(fixtures.TablesTest):
run_deletes = 'each'
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table('autoinc_pk', metadata,
Column('id', Integer, primary_key=True, \
test_needs_autoincrement=True),
Column('data', String(50))
)
Table('manual_pk', metadata,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('data', String(50))
)
def test_autoclose_on_insert(self):
if requirements.returning.enabled:
engine = engines.testing_engine(
options={'implicit_returning': False})
else:
engine = config.db
r = engine.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
assert r.closed
assert r.is_insert
assert not r.returns_rows
@requirements.returning
def test_autoclose_on_insert_implicit_returning(self):
r = config.db.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
assert r.closed
assert r.is_insert
assert not r.returns_rows
@requirements.empty_inserts
def test_empty_insert(self):
r = config.db.execute(
self.tables.autoinc_pk.insert(),
)
assert r.closed
r = config.db.execute(
self.tables.autoinc_pk.select().\
where(self.tables.autoinc_pk.c.id != None)
)
assert len(r.fetchall())
@requirements.insert_from_select
def test_insert_from_select(self):
table = self.tables.manual_pk
config.db.execute(
table.insert(),
[
dict(id=1, data="data1"),
dict(id=2, data="data2"),
dict(id=3, data="data3"),
]
)
config.db.execute(
table.insert(inline=True).
from_select(
("id", "data",), select([table.c.id + 5, table.c.data]).where(
table.c.data.in_(["data2", "data3"]))
),
)
eq_(
config.db.execute(
select([table.c.data]).order_by(table.c.data)
).fetchall(),
[("data1", ), ("data2", ), ("data2", ),
("data3", ), ("data3", )]
)
class ReturningTest(fixtures.TablesTest):
run_create_tables = 'each'
__requires__ = 'returning', 'autoincrement_insert'
__backend__ = True
__engine_options__ = {"implicit_returning": True}
def _assert_round_trip(self, table, conn):
row = conn.execute(table.select()).first()
eq_(
row,
(config.db.dialect.default_sequence_base, "some data")
)
@classmethod
def define_tables(cls, metadata):
Table('autoinc_pk', metadata,
Column('id', Integer, primary_key=True, \
test_needs_autoincrement=True),
Column('data', String(50))
)
@requirements.fetch_rows_post_commit
def test_explicit_returning_pk_autocommit(self):
engine = config.db
table = self.tables.autoinc_pk
r = engine.execute(
table.insert().returning(
table.c.id),
data="some data"
)
pk = r.first()[0]
fetched_pk = config.db.scalar(select([table.c.id]))
eq_(fetched_pk, pk)
def test_explicit_returning_pk_no_autocommit(self):
engine = config.db
table = self.tables.autoinc_pk
with engine.begin() as conn:
r = conn.execute(
table.insert().returning(
table.c.id),
data="some data"
)
pk = r.first()[0]
fetched_pk = config.db.scalar(select([table.c.id]))
eq_(fetched_pk, pk)
def test_autoincrement_on_insert_implcit_returning(self):
config.db.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
self._assert_round_trip(self.tables.autoinc_pk, config.db)
def test_last_inserted_id_implicit_returning(self):
r = config.db.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
pk = config.db.scalar(select([self.tables.autoinc_pk.c.id]))
eq_(
r.inserted_primary_key,
[pk]
)
__all__ = ('LastrowidTest', 'InsertBehaviorTest', 'ReturningTest')
|
gpl-3.0
|
karras/gitlab-docsbot
|
setup.py
|
1
|
2639
|
# -*- coding: UTF-8 -*-
"""Setuptools package definition"""
from setuptools import setup
from setuptools import find_packages
import codecs
import os
import sys
version = sys.version_info[0]
if version > 2:
pass
else:
pass
__version__ = None
version_file = "autodocs/version.py"
with codecs.open(version_file, encoding="UTF-8") as f:
code = compile(f.read(), version_file, 'exec')
exec(code)
def find_data(packages, extensions):
"""Finds data files along with source.
:param packages: Look in these packages
:param extensions: Look for these extensions
"""
data = {}
for package in packages:
package_path = package.replace('.', '/')
for dirpath, _, filenames in os.walk(package_path):
for filename in filenames:
for extension in extensions:
if filename.endswith(".%s" % extension):
file_path = os.path.join(
dirpath,
filename
)
file_path = file_path[len(package) + 1:]
if package not in data:
data[package] = []
data[package].append(file_path)
return data
with codecs.open('README.md', 'r', encoding="UTF-8") as f:
README_TEXT = f.read()
setup(
name = "gitlab-autodocs",
version = __version__,
packages = find_packages(),
package_data=find_data(
find_packages(), ["py"]
),
data_files = [
('/etc', ['autodocs/config/gitlab-autodocs.yaml']),
('/lib/systemd/system', ['autodocs/config/gitlab-autodocs.service'])
],
entry_points = {
'console_scripts': [
'gitlab-autodocs = autodocs:main',
]
},
install_requires = [
"requests",
"pyyaml",
"python-gitlab"
],
author = "Adfinis SyGroup AG",
author_email = "https://adfinis-sygroup.ch/",
description = "GitLab CI Docs Bot",
long_description = README_TEXT,
keywords = "GitLab CI autodocs bot",
url = "https://github.com/karras/gitlab-docsbot",
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Topic :: Software Development :: Build Tools"
]
)
|
gpl-3.0
|
paran0ids0ul/infernal-twin
|
build/pip/build/lib.linux-i686-2.7/pip/_vendor/progress/bar.py
|
404
|
2707
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import unicode_literals
from . import Progress
from .helpers import WritelnMixin
class Bar(WritelnMixin, Progress):
width = 32
message = ''
suffix = '%(index)d/%(max)d'
bar_prefix = ' |'
bar_suffix = '| '
empty_fill = ' '
fill = '#'
hide_cursor = True
def update(self):
filled_length = int(self.width * self.progress)
empty_length = self.width - filled_length
message = self.message % self
bar = self.fill * filled_length
empty = self.empty_fill * empty_length
suffix = self.suffix % self
line = ''.join([message, self.bar_prefix, bar, empty, self.bar_suffix,
suffix])
self.writeln(line)
class ChargingBar(Bar):
suffix = '%(percent)d%%'
bar_prefix = ' '
bar_suffix = ' '
empty_fill = '∙'
fill = '█'
class FillingSquaresBar(ChargingBar):
empty_fill = '▢'
fill = '▣'
class FillingCirclesBar(ChargingBar):
empty_fill = '◯'
fill = '◉'
class IncrementalBar(Bar):
phases = (' ', '▏', '▎', '▍', '▌', '▋', '▊', '▉', '█')
def update(self):
nphases = len(self.phases)
expanded_length = int(nphases * self.width * self.progress)
filled_length = int(self.width * self.progress)
empty_length = self.width - filled_length
phase = expanded_length - (filled_length * nphases)
message = self.message % self
bar = self.phases[-1] * filled_length
current = self.phases[phase] if phase > 0 else ''
empty = self.empty_fill * max(0, empty_length - len(current))
suffix = self.suffix % self
line = ''.join([message, self.bar_prefix, bar, current, empty,
self.bar_suffix, suffix])
self.writeln(line)
class ShadyBar(IncrementalBar):
phases = (' ', '░', '▒', '▓', '█')
|
gpl-3.0
|
wangcy6/storm_app
|
frame/c++/grpc-master/src/python/grpcio_tests/tests/unit/_metadata_test.py
|
12
|
7977
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests server and client side metadata API."""
import unittest
import weakref
import grpc
from grpc import _channel
from grpc.framework.foundation import logging_pool
from tests.unit import test_common
from tests.unit.framework.common import test_constants
_CHANNEL_ARGS = (('grpc.primary_user_agent', 'primary-agent'),
('grpc.secondary_user_agent', 'secondary-agent'))
_REQUEST = b'\x00\x00\x00'
_RESPONSE = b'\x00\x00\x00'
_UNARY_UNARY = '/test/UnaryUnary'
_UNARY_STREAM = '/test/UnaryStream'
_STREAM_UNARY = '/test/StreamUnary'
_STREAM_STREAM = '/test/StreamStream'
_CLIENT_METADATA = (('client-md-key', 'client-md-key'),
('client-md-key-bin', b'\x00\x01'))
_SERVER_INITIAL_METADATA = (
('server-initial-md-key', 'server-initial-md-value'),
('server-initial-md-key-bin', b'\x00\x02'))
_SERVER_TRAILING_METADATA = (
('server-trailing-md-key', 'server-trailing-md-value'),
('server-trailing-md-key-bin', b'\x00\x03'))
def user_agent(metadata):
for key, val in metadata:
if key == 'user-agent':
return val
raise KeyError('No user agent!')
def validate_client_metadata(test, servicer_context):
test.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, servicer_context.invocation_metadata()))
test.assertTrue(
user_agent(servicer_context.invocation_metadata())
.startswith('primary-agent ' + _channel._USER_AGENT))
test.assertTrue(
user_agent(servicer_context.invocation_metadata())
.endswith('secondary-agent'))
def handle_unary_unary(test, request, servicer_context):
validate_client_metadata(test, servicer_context)
servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
return _RESPONSE
def handle_unary_stream(test, request, servicer_context):
validate_client_metadata(test, servicer_context)
servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
for _ in range(test_constants.STREAM_LENGTH):
yield _RESPONSE
def handle_stream_unary(test, request_iterator, servicer_context):
validate_client_metadata(test, servicer_context)
servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
# TODO(issue:#6891) We should be able to remove this loop
for request in request_iterator:
pass
return _RESPONSE
def handle_stream_stream(test, request_iterator, servicer_context):
validate_client_metadata(test, servicer_context)
servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
# TODO(issue:#6891) We should be able to remove this loop,
# and replace with return; yield
for request in request_iterator:
yield _RESPONSE
class _MethodHandler(grpc.RpcMethodHandler):
def __init__(self, test, request_streaming, response_streaming):
self.request_streaming = request_streaming
self.response_streaming = response_streaming
self.request_deserializer = None
self.response_serializer = None
self.unary_unary = None
self.unary_stream = None
self.stream_unary = None
self.stream_stream = None
if self.request_streaming and self.response_streaming:
self.stream_stream = lambda x, y: handle_stream_stream(test, x, y)
elif self.request_streaming:
self.stream_unary = lambda x, y: handle_stream_unary(test, x, y)
elif self.response_streaming:
self.unary_stream = lambda x, y: handle_unary_stream(test, x, y)
else:
self.unary_unary = lambda x, y: handle_unary_unary(test, x, y)
class _GenericHandler(grpc.GenericRpcHandler):
def __init__(self, test):
self._test = test
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return _MethodHandler(self._test, False, False)
elif handler_call_details.method == _UNARY_STREAM:
return _MethodHandler(self._test, False, True)
elif handler_call_details.method == _STREAM_UNARY:
return _MethodHandler(self._test, True, False)
elif handler_call_details.method == _STREAM_STREAM:
return _MethodHandler(self._test, True, True)
else:
return None
class MetadataTest(unittest.TestCase):
def setUp(self):
self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
self._server = grpc.server(
self._server_pool, handlers=(_GenericHandler(weakref.proxy(self)),))
port = self._server.add_insecure_port('[::]:0')
self._server.start()
self._channel = grpc.insecure_channel(
'localhost:%d' % port, options=_CHANNEL_ARGS)
def tearDown(self):
self._server.stop(0)
def testUnaryUnary(self):
multi_callable = self._channel.unary_unary(_UNARY_UNARY)
unused_response, call = multi_callable.with_call(
_REQUEST, metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
call.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
call.trailing_metadata()))
def testUnaryStream(self):
multi_callable = self._channel.unary_stream(_UNARY_STREAM)
call = multi_callable(_REQUEST, metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
call.initial_metadata()))
for _ in call:
pass
self.assertTrue(
test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
call.trailing_metadata()))
def testStreamUnary(self):
multi_callable = self._channel.stream_unary(_STREAM_UNARY)
unused_response, call = multi_callable.with_call(
iter([_REQUEST] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
call.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
call.trailing_metadata()))
def testStreamStream(self):
multi_callable = self._channel.stream_stream(_STREAM_STREAM)
call = multi_callable(
iter([_REQUEST] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
call.initial_metadata()))
for _ in call:
pass
self.assertTrue(
test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
call.trailing_metadata()))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
apache-2.0
|
shhui/nova
|
nova/tests/api/openstack/compute/plugins/v3/test_flavor_rxtx.py
|
19
|
3365
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.plugins.v3 import flavor_rxtx
from nova.compute import flavors
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
FAKE_FLAVORS = {
'flavor 1': {
"flavorid": '1',
"name": 'flavor 1',
"memory_mb": '256',
"root_gb": '10',
"rxtx_factor": '1.0',
"swap": 0,
"ephemeral_gb": 0,
"vcpus": 1,
"disabled": False,
},
'flavor 2': {
"flavorid": '2',
"name": 'flavor 2',
"memory_mb": '512',
"root_gb": '10',
"rxtx_factor": None,
"swap": 0,
"vcpus": 1,
"ephemeral_gb": 0,
"disabled": False,
},
}
def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
return FAKE_FLAVORS['flavor %s' % flavorid]
def fake_get_all_flavors_sorted_list(context=None, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
return [
fake_flavor_get_by_flavor_id(1),
fake_flavor_get_by_flavor_id(2)
]
class FlavorRxtxTest(test.NoDBTestCase):
content_type = 'application/json'
prefix = '%s:' % flavor_rxtx.ALIAS
def setUp(self):
super(FlavorRxtxTest, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(flavors, "get_all_flavors_sorted_list",
fake_get_all_flavors_sorted_list)
self.stubs.Set(flavors,
"get_flavor_by_flavor_id",
fake_flavor_get_by_flavor_id)
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
app = fakes.wsgi_app_v3(init_only=('servers', 'flavors',
'os-flavor-rxtx'))
res = req.get_response(app)
return res
def _get_flavor(self, body):
return jsonutils.loads(body).get('flavor')
def _get_flavors(self, body):
return jsonutils.loads(body).get('flavors')
def assertFlavorRxtx(self, flavor, rxtx):
self.assertEqual(
flavor.get('%srxtx_factor' % self.prefix), rxtx)
def test_show(self):
url = '/v3/flavors/1'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertFlavorRxtx(self._get_flavor(res.body), '1.0')
def test_detail(self):
url = '/v3/flavors/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
flavors = self._get_flavors(res.body)
self.assertFlavorRxtx(flavors[0], '1.0')
self.assertFlavorRxtx(flavors[1], '')
|
apache-2.0
|
xuru/agar
|
agar/dates.py
|
5
|
2435
|
"""
The ``agar.dates`` module contains a function to help work with dates.
"""
import re
from datetime import datetime, timedelta
from pytz.gae import pytz
def parse_datetime(s):
"""
Create ``datetime`` object representing date/time expressed in a string.
Takes a string in the format produced by calling ``str()``
on a python ``datetime`` object and returns a ``datetime``
instance that would produce that string.
Acceptable formats are:
* ``YYYY-MM-DD HH:MM:SS.ssssss+HH:MM``
* ``YYYY-MM-DD HH:MM:SS.ssssss``
* ``YYYY-MM-DD HH:MM:SS+HH:MM``
* ``YYYY-MM-DD HH:MM:SS``
* ``YYYY-MM-DD``
Where ``ssssss`` represents fractional seconds. The timezone
is optional and may be either positive or negative
hours/minutes east of UTC.
:param s: The string to parse into a ``datetime``.
:return: The ``datetime`` represented by the given string.
"""
if s is None:
return None
# Split string in the form 2007-06-18 19:39:25.3300-07:00
# into its constituent date/time, microseconds, and
# timezone fields where microseconds and timezone are
# optional.
m = re.match(r'(.*?)(?:\.(\d+))?(([-+]\d{1,2}):(\d{2}))?$', str(s))
datestr, fractional, tzname, tzhour, tzmin = m.groups()
# Create tzinfo object representing the timezone
# expressed in the input string. The names we give
# for the timezones are lame: they are just the offset
# from UTC (as it appeared in the input string). We
# handle UTC specially since it is a very common case
# and we know its name.
if tzname is None:
tz = None
else:
tzhour, tzmin = int(tzhour), int(tzmin)
if tzhour == tzmin == 0:
tzname = 'UTC'
tz = pytz.reference.FixedOffset(timedelta(hours=tzhour, minutes=tzmin), tzname)
# Convert the date/time field into a python datetime
# object.
try:
x = datetime.strptime(datestr, "%Y-%m-%d %H:%M:%S")
except ValueError:
x = datetime.strptime(datestr, "%Y-%m-%d")
# Convert the fractional second portion into a count
# of microseconds.
if fractional is None:
fractional = '0'
fracpower = 6 - len(fractional)
fractional = float(fractional) * (10 ** fracpower)
# Return updated datetime object with microseconds and
# timezone information.
return x.replace(microsecond=int(fractional), tzinfo=tz)
|
mit
|
zubair-arbi/edx-platform
|
cms/djangoapps/contentstore/git_export_utils.py
|
187
|
7401
|
"""
Utilities for export a course's XML into a git repository,
committing and pushing the changes.
"""
import logging
import os
import subprocess
from urlparse import urlparse
from django.conf import settings
from django.contrib.auth.models import User
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from xmodule.contentstore.django import contentstore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.xml_exporter import export_course_to_xml
log = logging.getLogger(__name__)
GIT_REPO_EXPORT_DIR = getattr(settings, 'GIT_REPO_EXPORT_DIR', None)
GIT_EXPORT_DEFAULT_IDENT = getattr(settings, 'GIT_EXPORT_DEFAULT_IDENT',
{'name': 'STUDIO_EXPORT_TO_GIT',
'email': 'STUDIO_EXPORT_TO_GIT@example.com'})
class GitExportError(Exception):
"""
Convenience exception class for git export error conditions.
"""
def __init__(self, message):
# Force the lazy i18n values to turn into actual unicode objects
super(GitExportError, self).__init__(unicode(message))
NO_EXPORT_DIR = _("GIT_REPO_EXPORT_DIR not set or path {0} doesn't exist, "
"please create it, or configure a different path with "
"GIT_REPO_EXPORT_DIR").format(GIT_REPO_EXPORT_DIR)
URL_BAD = _('Non writable git url provided. Expecting something like:'
' git@github.com:mitocw/edx4edx_lite.git')
URL_NO_AUTH = _('If using http urls, you must provide the username '
'and password in the url. Similar to '
'https://user:pass@github.com/user/course.')
DETACHED_HEAD = _('Unable to determine branch, repo in detached HEAD mode')
CANNOT_PULL = _('Unable to update or clone git repository.')
XML_EXPORT_FAIL = _('Unable to export course to xml.')
CONFIG_ERROR = _('Unable to configure git username and password')
CANNOT_COMMIT = _('Unable to commit changes. This is usually '
'because there are no changes to be committed')
CANNOT_PUSH = _('Unable to push changes. This is usually '
'because the remote repository cannot be contacted')
BAD_COURSE = _('Bad course location provided')
MISSING_BRANCH = _('Missing branch on fresh clone')
def cmd_log(cmd, cwd):
"""
Helper function to redirect stderr to stdout and log the command
used along with the output. Will raise subprocess.CalledProcessError if
command doesn't return 0, and returns the command's output.
"""
output = subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)
log.debug('Command was: {0!r}. '
'Working directory was: {1!r}'.format(' '.join(cmd), cwd))
log.debug('Command output was: {0!r}'.format(output))
return output
def export_to_git(course_id, repo, user='', rdir=None):
"""Export a course to git."""
# pylint: disable=too-many-statements
if not GIT_REPO_EXPORT_DIR:
raise GitExportError(GitExportError.NO_EXPORT_DIR)
if not os.path.isdir(GIT_REPO_EXPORT_DIR):
raise GitExportError(GitExportError.NO_EXPORT_DIR)
# Check for valid writable git url
if not (repo.endswith('.git') or
repo.startswith(('http:', 'https:', 'file:'))):
raise GitExportError(GitExportError.URL_BAD)
# Check for username and password if using http[s]
if repo.startswith('http:') or repo.startswith('https:'):
parsed = urlparse(repo)
if parsed.username is None or parsed.password is None:
raise GitExportError(GitExportError.URL_NO_AUTH)
if rdir:
rdir = os.path.basename(rdir)
else:
rdir = repo.rsplit('/', 1)[-1].rsplit('.git', 1)[0]
log.debug("rdir = %s", rdir)
# Pull or clone repo before exporting to xml
# and update url in case origin changed.
rdirp = '{0}/{1}'.format(GIT_REPO_EXPORT_DIR, rdir)
branch = None
if os.path.exists(rdirp):
log.info('Directory already exists, doing a git reset and pull '
'instead of git clone.')
cwd = rdirp
# Get current branch
cmd = ['git', 'symbolic-ref', '--short', 'HEAD']
try:
branch = cmd_log(cmd, cwd).strip('\n')
except subprocess.CalledProcessError as ex:
log.exception('Failed to get branch: %r', ex.output)
raise GitExportError(GitExportError.DETACHED_HEAD)
cmds = [
['git', 'remote', 'set-url', 'origin', repo],
['git', 'fetch', 'origin'],
['git', 'reset', '--hard', 'origin/{0}'.format(branch)],
['git', 'pull'],
['git', 'clean', '-d', '-f'],
]
else:
cmds = [['git', 'clone', repo]]
cwd = GIT_REPO_EXPORT_DIR
cwd = os.path.abspath(cwd)
for cmd in cmds:
try:
cmd_log(cmd, cwd)
except subprocess.CalledProcessError as ex:
log.exception('Failed to pull git repository: %r', ex.output)
raise GitExportError(GitExportError.CANNOT_PULL)
# export course as xml before commiting and pushing
root_dir = os.path.dirname(rdirp)
course_dir = os.path.basename(rdirp).rsplit('.git', 1)[0]
try:
export_course_to_xml(modulestore(), contentstore(), course_id,
root_dir, course_dir)
except (EnvironmentError, AttributeError):
log.exception('Failed export to xml')
raise GitExportError(GitExportError.XML_EXPORT_FAIL)
# Get current branch if not already set
if not branch:
cmd = ['git', 'symbolic-ref', '--short', 'HEAD']
try:
branch = cmd_log(cmd, os.path.abspath(rdirp)).strip('\n')
except subprocess.CalledProcessError as ex:
log.exception('Failed to get branch from freshly cloned repo: %r',
ex.output)
raise GitExportError(GitExportError.MISSING_BRANCH)
# Now that we have fresh xml exported, set identity, add
# everything to git, commit, and push to the right branch.
ident = {}
try:
user = User.objects.get(username=user)
ident['name'] = user.username
ident['email'] = user.email
except User.DoesNotExist:
# That's ok, just use default ident
ident = GIT_EXPORT_DEFAULT_IDENT
time_stamp = timezone.now()
cwd = os.path.abspath(rdirp)
commit_msg = "Export from Studio at {time_stamp}".format(
time_stamp=time_stamp,
)
try:
cmd_log(['git', 'config', 'user.email', ident['email']], cwd)
cmd_log(['git', 'config', 'user.name', ident['name']], cwd)
except subprocess.CalledProcessError as ex:
log.exception('Error running git configure commands: %r', ex.output)
raise GitExportError(GitExportError.CONFIG_ERROR)
try:
cmd_log(['git', 'add', '.'], cwd)
cmd_log(['git', 'commit', '-a', '-m', commit_msg], cwd)
except subprocess.CalledProcessError as ex:
log.exception('Unable to commit changes: %r', ex.output)
raise GitExportError(GitExportError.CANNOT_COMMIT)
try:
cmd_log(['git', 'push', '-q', 'origin', branch], cwd)
except subprocess.CalledProcessError as ex:
log.exception('Error running git push command: %r', ex.output)
raise GitExportError(GitExportError.CANNOT_PUSH)
|
agpl-3.0
|
nubark/odoo
|
addons/survey/tests/test_survey.py
|
47
|
18040
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import random
import re
from collections import Counter
from itertools import product
from urlparse import urljoin
from openerp import _
from openerp.exceptions import UserError
from openerp.tests.common import TransactionCase
from openerp.addons.website.models.website import slug
class TestSurvey(TransactionCase):
def setUp(self):
super(TestSurvey, self).setUp()
User = self.env['res.users'].with_context({'no_reset_password': True})
(group_survey_user, group_employee) = (self.ref('base.group_survey_user'), self.ref('base.group_user'))
self.survey_manager = User.create({
'name': 'Gustave Doré', 'login': 'Gustav', 'alias_name': 'gustav', 'email': 'gustav.dore@example.com',
'groups_id': [(6, 0, [self.ref('base.group_survey_manager'), group_survey_user, group_employee])]})
self.survey_user = User.create({
'name': 'Lukas Peeters', 'login': 'Lukas', 'alias_name': 'lukas', 'email': 'lukas.petters@example.com',
'groups_id': [(6, 0, [group_survey_user, group_employee])]})
self.user_public = User.create({
'name': 'Wout Janssens', 'login': 'Wout', 'alias_name': 'wout', 'email': 'wout.janssens@example.com',
'groups_id': [(6, 0, [self.ref('base.group_public')])]})
self.Partner = self.env['res.partner']
self.Survey = self.env['survey.survey']
self.Question = self.env['survey.question']
self.Stage = self.env['survey.stage']
self.SurveyMailMessage = self.env['survey.mail.compose.message']
self.UserInput = self.env['survey.user_input']
self.UserInputLine = self.env['survey.user_input_line']
self.survey1 = self.Survey.sudo(self.survey_manager).create({'title': "S0", 'page_ids': [(0, 0, {'title': "P0"})]})
self.page1 = self.survey1.page_ids[0]
def test_00_create_minimal_survey(self):
question = self.Question.sudo(self.survey_manager).create({'page_id': self.page1.id, 'question': 'Q0'})
self.assertEqual(self.survey1.title, "S0", msg="Title of the survey is somehow modified.")
self.assertEqual(len(self.survey1.page_ids), 1, msg="Additional Pages are linked with the survey after creation.")
self.assertEqual(self.page1.title, "P0", msg="Title of the page is somehow modified.")
self.assertEqual(len(self.page1.question_ids), 1, msg="Additional questions are linked with the page after creation.")
self.assertEqual(question.question, "Q0", msg="Title of the Question is somehow modified.")
def test_01_question_type_validation_save_line_function(self):
for (question_type, text) in self.Question._columns['type'].selection:
# Each question ype must have validation function.
self.assertTrue(hasattr(self.Question, 'validate_' + question_type), msg="Question must have a validation method in\
the form of 'validate_' followed by the name of the type.")
# Survey Input Lines must have validation function for each line.
self.assertTrue(hasattr(self.UserInputLine, 'save_line_' + question_type), msg="Inputline must have Save method in \
the form of 'save_line_' followed by the name of the type.")
def test_02_question_answer_required(self):
for (question_type, text) in self.Question._columns['type'].selection:
# Blank value of field is not accepted for mandatory questions.
if question_type == 'multiple_choice':
question = self.Question.sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'multiple_choice',
'constr_mandatory': True, 'constr_error_msg': 'Error',
'labels_ids': [(0, 0, {'value': "MChoice0", "quizz_mark": 0}), (0, 0, {'value': "MChoice1", "quizz_mark": 0})]})
elif question_type == 'matrix':
question = self.Question.sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'matrix', 'matrix_subtype': 'simple',
'constr_mandatory': True, 'constr_error_msg': 'Error',
'labels_ids': [(0, 0, {'value': "Column0", "quizz_mark": 0}), (0, 0, {'value': "Column1", "quizz_mark": 0})],
'labels_ids_2': [(0, 0, {'value': "Row0", "quizz_mark": 0}), (0, 0, {'value': "Row1", "quizz_mark": 0})]})
else:
question = self.Question.sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': question_type, 'constr_mandatory': True, 'constr_error_msg': 'Error'})
answer_tag = '%s_%s_%s' % (self.survey1.id, self.page1.id, question.id)
self.assertDictEqual({answer_tag: "Error"}, self.Question.validate_question(question, {answer_tag: ''}, answer_tag),
msg=("Validation function for type %s is unable to generate error if it is mandatory and answer is blank." % question_type))
def test_03_question_textbox(self):
questions = [
self.Question.sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'textbox', 'validation_email': True}),
self.Question.sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q1', 'type': 'textbox', 'validation_required': True,
'validation_length_min': 2, 'validation_length_max': 8, 'validation_error_msg': "Error"})]
results = [('test @ testcom', _('This answer must be an email address')), ('t', 'Error')]
for i in range(len(questions)):
answer_tag = '%s_%s_%s' % (self.survey1.id, self.page1.id, questions[i].id)
self.assertEqual(self.Question.validate_question(questions[i], {answer_tag: results[i][0]}, answer_tag), {answer_tag: results[i][1]}, msg="\
Validation function for textbox is unable to notify if answer is violating the validation rules")
def test_04_question_numerical_box(self):
question = self.Question.sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'numerical_box', 'validation_required': True,
'validation_min_float_value': 2.1, 'validation_max_float_value': 3.0, 'validation_error_msg': "Error"})
answer_tag = '%s_%s_%s' % (self.survey1.id, self.page1.id, question.id)
results = [('aaa', _('This is not a number')), ('4.5', 'Error'), ('0.1', 'Error')]
for i in range(len(results)):
self.assertEqual(self.Question.validate_question(question, {answer_tag: results[i][0]}, answer_tag), {answer_tag: results[i][1]}, msg="\
Validation function for type numerical_box is unable to notify if answer is violating the validation rules")
def test_05_question_datetime(self):
question = self.Question.sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'datetime', 'validation_required': True,
'validation_min_date': '2015-03-20 00:00:00', 'validation_max_date': '2015-03-25 00:00:00', 'validation_error_msg': "Error"})
answer_tag = '%s_%s_%s' % (self.survey1.id, self.page1.id, question.id)
results = [('2015-55-10', _('This is not a date/time')), ('2015-03-19 00:00:00', 'Error'), ('2015-03-26 00:00:00', 'Error')]
for i in range(len(results)):
self.assertEqual(self.Question.validate_question(question, {answer_tag: results[i][0]}, answer_tag), {answer_tag: results[i][1]}, msg="\
Validation function for type datetime is unable to notify if answer is violating the validation rules")
def test_06_survey_sharing(self):
# Case-1: Executing action with correct data.
correct_survey = self.Survey.sudo(self.survey_manager).create({
'title': "S0", 'stage_id': self.Stage.search([('sequence', '=', 1)]).id,
'page_ids': [(0, 0, {'title': "P0", 'question_ids': [(0, 0, {'question': "Q0", 'type': 'free_text'})]})]})
action = correct_survey.action_send_survey()
templates = self.env['ir.model.data'].get_object_reference('survey', 'email_template_survey')
template_id = templates[1] if len(templates) > 0 else False
ctx = dict(self.env.context)
ctx.update({
'default_model': 'survey.survey',
'default_res_id': correct_survey.id,
'default_survey_id': correct_survey.id,
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment'})
self.assertDictEqual(action, {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'survey.mail.compose.message',
'target': 'new',
'context': ctx,
})
# Case-2: Executing action with incorrect data.
surveys = [
self.Survey.sudo(self.survey_manager).create({ # Survey without any page or question.
'title': "Test survey"}),
self.Survey.sudo(self.survey_manager).create({ # Closed Survey.
'title': "S0", 'stage_id': self.Stage.search([('closed', '=', True)]).id, # Getting Closed stage id.
'page_ids': [(0, 0, {'title': "P0", 'question_ids': [(0, 0, {'question': "Q0", 'type': 'free_text'})]})]})]
for survey in surveys:
self.assertRaises(UserError, survey.action_send_survey)
def test_07_survey_email_message(self):
# Case-1: Executing send_mail with correct data.
partner = self.Partner.create({'name': 'Marie De Cock', 'email': 'marie.de.cock@gmail.com'})
survey_mail_message = self.SurveyMailMessage.sudo(self.survey_manager).create({
'survey_id': self.survey1.id, 'public': 'email_public_link', 'body': '__URL__', 'partner_ids': [(4, partner.id)]})
survey_mail_message.send_mail()
# Case-2: Executing send_mail with incorrect data.
mail_messages = [
self.SurveyMailMessage.sudo(self.survey_manager).create({ # Mail Message without __URL__ in body.
'survey_id': self.survey1.id, 'public': 'email_public_link'}),
self.SurveyMailMessage.sudo(self.survey_manager).create({ # Mail Message without recipents.
'survey_id': self.survey1.id, 'public': 'email_public_link', 'body': "__URL__"})]
for message in mail_messages:
self.assertRaises(UserError, message.send_mail)
def test_08_survey_urls(self):
def validate_url(url):
""" Reference: https://github.com/django/django/blob/master/django/core/validators.py """
url_regex = re.compile(
r'^https?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return True if url_regex.match(url) else False
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
urltypes = {'public': 'start', 'print': 'print', 'result': 'results'}
for urltype, urltxt in urltypes.iteritems():
survey_url = getattr(self.survey1, urltype + '_url')
survey_url_relative = getattr(self.survey1.with_context({'relative_url': True}), urltype + '_url')
self.assertTrue(validate_url(survey_url))
url = "survey/%s/%s" % (urltxt, slug(self.survey1))
full_url = urljoin(base_url, url)
self.assertEqual(full_url, survey_url)
self.assertEqual('/' + url, survey_url_relative)
if urltype == 'public':
url_html = '<a href="%s">Click here to start survey</a>'
self.assertEqual(url_html % full_url, getattr(self.survey1, urltype + '_url_html'), msg="Public URL is incorrect")
self.assertEqual(url_html % ('/' + url), getattr(self.survey1.with_context({'relative_url': True}), urltype + '_url_html'), msg="Public URL is incorrect.")
def test_09_answer_survey(self):
question = self.Question.sudo(self.survey_manager).create({'page_id': self.page1.id, 'question': 'Q0'})
input_portal = self.UserInput.sudo(self.survey_user).create({
'survey_id': self.survey1.id,
'partner_id': self.survey_user.partner_id.id,
'user_input_line_ids': [(0, 0, {
'skipped': False, 'answer_type': 'free_text', 'value_free_text': "Test Answer",
'survey_id': self.survey1.id, 'page_id': self.page1, 'question_id': question.id})]})
input_public = self.UserInput.sudo(self.user_public).create({
'survey_id': self.survey1.id,
'partner_id': self.survey_user.partner_id.id,
'user_input_line_ids': [(0, 0, {
'skipped': False, 'answer_type': 'free_text', 'value_free_text': "Test Answer",
'survey_id': self.survey1.id, 'page_id': self.page1, 'question_id': question.id})]})
answers = [input_portal.user_input_line_ids[0], input_public.user_input_line_ids[0]]
expected_values = {'answer_type': 'free_text', 'value_free_text': "Test Answer"}
for answer in answers:
for field, value in expected_values.iteritems():
self.assertEqual(getattr(answer, field), value, msg="Unable to answer the survey. Expected behaviour of %s is not proper." % (field))
def test_10_survey_result_simple_multiple_choice(self):
question = self.Question.sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'simple_choice',
'labels_ids': [(0, 0, {'value': "Choice0", 'quizz_mark': 0}), (0, 0, {'value': "Choice1", 'quizz_mark': 0})]})
for i in range(3):
self.UserInput.sudo(self.user_public).create({'survey_id': self.survey1.id, 'user_input_line_ids': [(0, 0, {
'question_id': question.id,
'answer_type': 'suggestion',
'value_suggested': random.choice(question.labels_ids.ids)})]})
lines = [line.value_suggested.id for line in question.user_input_line_ids]
answers = [{'text': label.value, 'count': lines.count(label.id), 'answer_id': label.id} for label in question.labels_ids]
prp_result = self.Survey.prepare_result(question)['answers']
answers.sort()
prp_result.sort()
self.assertEqual(prp_result, answers, msg="Statistics of simple, multiple choice questions are different from expectation")
def test_11_survey_result_matrix(self):
question = self.Question.sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'matrix', 'matrix_subtype': 'simple',
'labels_ids': [(0, 0, {'value': "Column0", "quizz_mark": 0}), (0, 0, {'value': "Column1", "quizz_mark": 0})],
'labels_ids_2': [(0, 0, {'value': "Row0", "quizz_mark": 0}), (0, 0, {'value': "Row1", "quizz_mark": 0})]})
for i in range(3):
self.UserInput.sudo(self.user_public).create({'survey_id': self.survey1.id, 'user_input_line_ids': [(0, 0, {
'question_id': question.id, 'answer_type': 'suggestion', 'value_suggested': random.choice(question.labels_ids.ids),
'value_suggested_row': random.choice(question.labels_ids_2.ids)})]})
lines = [(line.value_suggested_row.id, line.value_suggested.id) for line in question.user_input_line_ids]
res = {}
for i in product(question.labels_ids_2.ids, question.labels_ids.ids):
res[i] = lines.count((i))
self.assertEqual(self.Survey.prepare_result(question)['result'], res, msg="Statistics of matrix type questions are different from expectations")
def test_12_survey_result_numeric_box(self):
question = self.Question.sudo(self.survey_manager).create({'page_id': self.page1.id, 'question': 'Q0', 'type': 'numerical_box'})
num = map(float, random.sample(range(1, 100), 3))
nsum = sum(num)
for i in range(3):
self.UserInput.sudo(self.user_public).create({'survey_id': self.survey1.id, 'user_input_line_ids': [(0, 0, {
'question_id': question.id, 'answer_type': 'number', 'value_number': num[i]})]})
exresult = {
'average': round((nsum / len(num)), 2), 'max': round(max(num), 2),
'min': round(min(num), 2), 'sum': nsum, 'most_common': Counter(num).most_common(5)}
result = self.Survey.prepare_result(question)
for key in exresult.keys():
self.assertEqual(result[key], exresult[key], msg="Statistics of numeric box type questions are different from expectations")
def test_13_survey_actions(self):
self.Question.sudo(self.survey_manager).create({'page_id': self.page1.id, 'question': 'Q0', 'type': 'numerical_box'})
actions = {
'start': {'method': 'public', 'token': '/test', 'text': 'Start'},
'print': {'method': 'print', 'token': '/test', 'text': 'Print'},
'result': {'method': 'result', 'token': '', 'text': 'Results of the'},
'test': {'method': 'public', 'token': '/phantom', 'text': 'Results of the'}}
for action, val in actions.iteritems():
result = getattr(self.survey1.with_context({'survey_token': val['token'][1:]}), 'action_' + action + '_survey')()
url = getattr(self.survey1.with_context({'relative_url': True}), val['method'] + '_url') + val['token']
self.assertEqual(result['url'], url)
|
gpl-3.0
|
zstyblik/infernal-twin
|
build/reportlab/tools/docco/codegrab.py
|
14
|
7818
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/tools/docco/codegrab.py
#codegrab.py
"""
This grabs various Python class, method and function
headers and their doc strings to include in documents
"""
import imp
import types
import string
import os
import sys
class Struct:
pass
def getObjectsDefinedIn(modulename, directory=None):
"""Returns two tuple of (functions, classes) defined
in the given module. 'directory' must be the directory
containing the script; modulename should not include
the .py suffix"""
if directory:
searchpath = [directory]
else:
searchpath = sys.path # searches usual Python path
#might be a package. If so, check the top level
#package is there, then recalculate the path needed
words = modulename.split('.')
if len(words) > 1:
packagename = words[0]
packagefound = imp.find_module(packagename, searchpath)
assert packagefound, "Package %s not found" % packagename
(file, packagepath, description) = packagefound
#now the full path should be known, if it is in the
#package
directory = os.path.join(*([packagepath] + words[1:-1]))
modulename = words[-1]
searchpath = [directory]
#find and import the module.
found = imp.find_module(modulename, searchpath)
assert found, "Module %s not found" % modulename
(file, pathname, description) = found
mod = imp.load_module(modulename, file, pathname, description)
#grab the code too, minus trailing newlines
lines = open(pathname, 'r').readlines()
lines = list(map(str.rstrip, lines))
result = Struct()
result.functions = []
result.classes = []
result.doc = mod.__doc__
for name in dir(mod):
value = getattr(mod, name)
if type(value) is types.FunctionType:
path, file = os.path.split(value.__code__.co_filename)
root, ext = os.path.splitext(file)
#we're possibly interested in it
if root == modulename:
#it was defined here
funcObj = value
fn = Struct()
fn.name = name
fn.proto = getFunctionPrototype(funcObj, lines)
if funcObj.__doc__:
fn.doc = dedent(funcObj.__doc__)
else:
fn.doc = '(no documentation string)'
#is it official?
if name[0:1] == '_':
fn.status = 'private'
elif name[-1] in '0123456789':
fn.status = 'experimental'
else:
fn.status = 'official'
result.functions.append(fn)
elif type(value) == type:
if value.__module__ == modulename:
cl = Struct()
cl.name = name
if value.__doc__:
cl.doc = dedent(value.__doc__)
else:
cl.doc = "(no documentation string)"
cl.bases = []
for base in value.__bases__:
cl.bases.append(base.__name__)
if name[0:1] == '_':
cl.status = 'private'
elif name[-1] in '0123456789':
cl.status = 'experimental'
else:
cl.status = 'official'
cl.methods = []
#loop over dict finding methods defined here
# Q - should we show all methods?
# loop over dict finding methods defined here
items = list(value.__dict__.items())
items.sort()
for (key2, value2) in items:
if type(value2) != types.FunctionType:
continue # not a method
elif os.path.splitext(value2.__code__.co_filename)[0] == modulename:
continue # defined in base class
else:
#we want it
meth = Struct()
meth.name = key2
name2 = value2.__code__.co_name
meth.proto = getFunctionPrototype(value2, lines)
if name2!=key2:
meth.doc = 'pointer to '+name2
meth.proto = meth.proto.replace(name2,key2)
else:
if value2.__doc__:
meth.doc = dedent(value2.__doc__)
else:
meth.doc = "(no documentation string)"
#is it official?
if key2[0:1] == '_':
meth.status = 'private'
elif key2[-1] in '0123456789':
meth.status = 'experimental'
else:
meth.status = 'official'
cl.methods.append(meth)
result.classes.append(cl)
return result
def getFunctionPrototype(f, lines):
"""Pass in the function object and list of lines;
it extracts the header as a multiline text block."""
firstLineNo = f.__code__.co_firstlineno - 1
lineNo = firstLineNo
brackets = 0
while 1:
line = lines[lineNo]
for char in line:
if char == '(':
brackets = brackets + 1
elif char == ')':
brackets = brackets - 1
if brackets == 0:
break
else:
lineNo = lineNo + 1
usefulLines = lines[firstLineNo:lineNo+1]
return '\n'.join(usefulLines)
def dedent(comment):
"""Attempts to dedent the lines to the edge. Looks at no.
of leading spaces in line 2, and removes up to that number
of blanks from other lines."""
commentLines = comment.split('\n')
if len(commentLines) < 2:
cleaned = list(map(str.lstrip, commentLines))
else:
spc = 0
for char in commentLines[1]:
if char in string.whitespace:
spc = spc + 1
else:
break
#now check other lines
cleaned = []
for line in commentLines:
for i in range(min(len(line),spc)):
if line[0] in string.whitespace:
line = line[1:]
cleaned.append(line)
return '\n'.join(cleaned)
def dumpDoc(modulename, directory=None):
"""Test support. Just prints docco on the module
to standard output."""
docco = getObjectsDefinedIn(modulename, directory)
print('codegrab.py - ReportLab Documentation Utility')
print('documenting', modulename + '.py')
print('-------------------------------------------------------')
print()
if docco.functions == []:
print('No functions found')
else:
print('Functions:')
for f in docco.functions:
print(f.proto)
print(' ' + f.doc)
if docco.classes == []:
print('No classes found')
else:
print('Classes:')
for c in docco.classes:
print(c.name)
print(' ' + c.doc)
for m in c.methods:
print(m.proto) # it is already indented in the file!
print(' ' + m.doc)
print()
def test(m='reportlab.platypus.paragraph'):
dumpDoc(m)
if __name__=='__main__':
import sys
print('Path to search:')
for line in sys.path:
print(' ',line)
M = sys.argv[1:]
if M==[]:
M.append('reportlab.platypus.paragraph')
for m in M:
test(m)
|
gpl-3.0
|
shanglt/youtube-dl
|
youtube_dl/extractor/ntvru.py
|
124
|
5022
|
# encoding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
clean_html,
xpath_text,
int_or_none,
)
class NTVRuIE(InfoExtractor):
IE_NAME = 'ntv.ru'
_VALID_URL = r'http://(?:www\.)?ntv\.ru/(?P<id>.+)'
_TESTS = [
{
'url': 'http://www.ntv.ru/novosti/863142/',
'md5': 'ba7ea172a91cb83eb734cad18c10e723',
'info_dict': {
'id': '746000',
'ext': 'mp4',
'title': 'Командующий Черноморским флотом провел переговоры в штабе ВМС Украины',
'description': 'Командующий Черноморским флотом провел переговоры в штабе ВМС Украины',
'thumbnail': 're:^http://.*\.jpg',
'duration': 136,
},
},
{
'url': 'http://www.ntv.ru/video/novosti/750370/',
'md5': 'adecff79691b4d71e25220a191477124',
'info_dict': {
'id': '750370',
'ext': 'mp4',
'title': 'Родные пассажиров пропавшего Boeing не верят в трагический исход',
'description': 'Родные пассажиров пропавшего Boeing не верят в трагический исход',
'thumbnail': 're:^http://.*\.jpg',
'duration': 172,
},
},
{
'url': 'http://www.ntv.ru/peredacha/segodnya/m23700/o232416',
'md5': '82dbd49b38e3af1d00df16acbeab260c',
'info_dict': {
'id': '747480',
'ext': 'mp4',
'title': '«Сегодня». 21 марта 2014 года. 16:00',
'description': '«Сегодня». 21 марта 2014 года. 16:00',
'thumbnail': 're:^http://.*\.jpg',
'duration': 1496,
},
},
{
'url': 'http://www.ntv.ru/kino/Koma_film',
'md5': 'f825770930937aa7e5aca0dc0d29319a',
'info_dict': {
'id': '1007609',
'ext': 'mp4',
'title': 'Остросюжетный фильм «Кома»',
'description': 'Остросюжетный фильм «Кома»',
'thumbnail': 're:^http://.*\.jpg',
'duration': 5592,
},
},
{
'url': 'http://www.ntv.ru/serial/Delo_vrachey/m31760/o233916/',
'md5': '9320cd0e23f3ea59c330dc744e06ff3b',
'info_dict': {
'id': '751482',
'ext': 'mp4',
'title': '«Дело врачей»: «Деревце жизни»',
'description': '«Дело врачей»: «Деревце жизни»',
'thumbnail': 're:^http://.*\.jpg',
'duration': 2590,
},
},
]
_VIDEO_ID_REGEXES = [
r'<meta property="og:url" content="http://www\.ntv\.ru/video/(\d+)',
r'<video embed=[^>]+><id>(\d+)</id>',
r'<video restriction[^>]+><key>(\d+)</key>',
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_id = self._html_search_regex(self._VIDEO_ID_REGEXES, webpage, 'video id')
player = self._download_xml(
'http://www.ntv.ru/vi%s/' % video_id,
video_id, 'Downloading video XML')
title = clean_html(xpath_text(player, './data/title', 'title', fatal=True))
description = clean_html(xpath_text(player, './data/description', 'description'))
video = player.find('./data/video')
video_id = xpath_text(video, './id', 'video id')
thumbnail = xpath_text(video, './splash', 'thumbnail')
duration = int_or_none(xpath_text(video, './totaltime', 'duration'))
view_count = int_or_none(xpath_text(video, './views', 'view count'))
token = self._download_webpage(
'http://stat.ntv.ru/services/access/token',
video_id, 'Downloading access token')
formats = []
for format_id in ['', 'hi', 'webm']:
file_ = video.find('./%sfile' % format_id)
if file_ is None:
continue
size = video.find('./%ssize' % format_id)
formats.append({
'url': 'http://media2.ntv.ru/vod/%s&tok=%s' % (file_.text, token),
'filesize': int_or_none(size.text if size is not None else None),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
'formats': formats,
}
|
unlicense
|
jhutar/spacewalk
|
backend/test/test_cache.py
|
17
|
1050
|
#
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
from spacewalk.common import rhnCache
key = "/var/goo/goo"
data = "0123456789" * 1024 * 1024
rhnCache.set(key, data, compressed=1, raw=1)
assert data == rhnCache.get(key, compressed=1, raw=1)
rhnCache.set(key, "12345", raw=1)
# Should return None, opening uncompressed data as compressed
assert None == rhnCache.get(key, compressed=1, raw=1)
# Should return None, opening raw data as pickled
assert None == rhnCache.get(key, raw=0)
|
gpl-2.0
|
quickresolve/accel.ai
|
flask-aws/lib/python2.7/site-packages/botocore/vendored/requests/packages/chardet/langhungarianmodel.py
|
2763
|
12536
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin2_HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
win1250HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 94.7368%
# first 1024 sequences:5.2623%
# rest sequences: 0.8894%
# negative sequences: 0.0009%
HungarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
)
Latin2HungarianModel = {
'charToOrderMap': Latin2_HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "ISO-8859-2"
}
Win1250HungarianModel = {
'charToOrderMap': win1250HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "windows-1250"
}
# flake8: noqa
|
mit
|
davidjb/sqlalchemy
|
test/orm/test_utils.py
|
20
|
23966
|
from sqlalchemy.testing import assert_raises, assert_raises_message
from sqlalchemy.orm import util as orm_util
from sqlalchemy import Column
from sqlalchemy import util
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy.orm import aliased, with_polymorphic, synonym
from sqlalchemy.orm import mapper, create_session, Session
from sqlalchemy.testing import fixtures
from test.orm import _fixtures
from sqlalchemy.testing import eq_, is_
from sqlalchemy.orm.path_registry import PathRegistry, RootRegistry
from sqlalchemy import inspect
from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method
from sqlalchemy.testing import AssertsCompiledSQL
class AliasedClassTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def _fixture(self, cls, properties={}):
table = Table('point', MetaData(),
Column('id', Integer(), primary_key=True),
Column('x', Integer),
Column('y', Integer))
mapper(cls, table, properties=properties)
return table
def test_simple(self):
class Point(object):
pass
table = self._fixture(Point)
alias = aliased(Point)
assert alias.id
assert alias.x
assert alias.y
assert Point.id.__clause_element__().table is table
assert alias.id.__clause_element__().table is not table
def test_not_instantiatable(self):
class Point(object):
pass
table = self._fixture(Point)
alias = aliased(Point)
assert_raises(TypeError, alias)
def test_instancemethod(self):
class Point(object):
def zero(self):
self.x, self.y = 0, 0
table = self._fixture(Point)
alias = aliased(Point)
assert Point.zero
# TODO: I don't quite understand this
# still
if util.py2k:
assert not getattr(alias, 'zero')
else:
assert getattr(alias, 'zero')
def test_classmethod(self):
class Point(object):
@classmethod
def max_x(cls):
return 100
table = self._fixture(Point)
alias = aliased(Point)
assert Point.max_x
assert alias.max_x
assert Point.max_x() == alias.max_x() == 100
def test_simple_property(self):
class Point(object):
@property
def max_x(self):
return 100
table = self._fixture(Point)
alias = aliased(Point)
assert Point.max_x
assert Point.max_x != 100
assert alias.max_x
assert Point.max_x is alias.max_x
def test_descriptors(self):
class descriptor(object):
def __init__(self, fn):
self.fn = fn
def __get__(self, obj, owner):
if obj is not None:
return self.fn(obj, obj)
else:
return self
def method(self):
return 'method'
class Point(object):
center = (0, 0)
@descriptor
def thing(self, arg):
return arg.center
table = self._fixture(Point)
alias = aliased(Point)
assert Point.thing != (0, 0)
assert Point().thing == (0, 0)
assert Point.thing.method() == 'method'
assert alias.thing != (0, 0)
assert alias.thing.method() == 'method'
def _assert_has_table(self, expr, table):
from sqlalchemy import Column # override testlib's override
for child in expr.get_children():
if isinstance(child, Column):
assert child.table is table
def test_hybrid_descriptor_one(self):
class Point(object):
def __init__(self, x, y):
self.x, self.y = x, y
@hybrid_method
def left_of(self, other):
return self.x < other.x
self._fixture(Point)
alias = aliased(Point)
sess = Session()
self.assert_compile(
sess.query(alias).filter(alias.left_of(Point)),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x < point.x"
)
def test_hybrid_descriptor_two(self):
class Point(object):
def __init__(self, x, y):
self.x, self.y = x, y
@hybrid_property
def double_x(self):
return self.x * 2
self._fixture(Point)
alias = aliased(Point)
eq_(str(Point.double_x), "point.x * :x_1")
eq_(str(alias.double_x), "point_1.x * :x_1")
sess = Session()
self.assert_compile(
sess.query(alias).filter(alias.double_x > Point.x),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x * :x_1 > point.x"
)
def test_hybrid_descriptor_three(self):
class Point(object):
def __init__(self, x, y):
self.x, self.y = x, y
@hybrid_property
def x_alone(self):
return self.x
self._fixture(Point)
alias = aliased(Point)
eq_(str(Point.x_alone), "Point.x")
eq_(str(alias.x_alone), "AliasedClass_Point.x")
assert Point.x_alone is Point.x
eq_(str(alias.x_alone == alias.x), "point_1.x = point_1.x")
a2 = aliased(Point)
eq_(str(a2.x_alone == alias.x), "point_1.x = point_2.x")
sess = Session()
self.assert_compile(
sess.query(alias).filter(alias.x_alone > Point.x),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x > point.x"
)
def test_proxy_descriptor_one(self):
class Point(object):
def __init__(self, x, y):
self.x, self.y = x, y
self._fixture(Point, properties={
'x_syn': synonym("x")
})
alias = aliased(Point)
eq_(str(Point.x_syn), "Point.x_syn")
eq_(str(alias.x_syn), "AliasedClass_Point.x_syn")
sess = Session()
self.assert_compile(
sess.query(alias.x_syn).filter(alias.x_syn > Point.x_syn),
"SELECT point_1.x AS point_1_x FROM point AS point_1, point "
"WHERE point_1.x > point.x"
)
def test_parententity_vs_parentmapper(self):
class Point(object):
pass
self._fixture(Point, properties={
'x_syn': synonym("x")
})
pa = aliased(Point)
is_(Point.x_syn._parententity, inspect(Point))
is_(Point.x._parententity, inspect(Point))
is_(Point.x_syn._parentmapper, inspect(Point))
is_(Point.x._parentmapper, inspect(Point))
is_(
Point.x_syn.__clause_element__()._annotations['parententity'],
inspect(Point))
is_(
Point.x.__clause_element__()._annotations['parententity'],
inspect(Point))
is_(
Point.x_syn.__clause_element__()._annotations['parentmapper'],
inspect(Point))
is_(
Point.x.__clause_element__()._annotations['parentmapper'],
inspect(Point))
pa = aliased(Point)
is_(pa.x_syn._parententity, inspect(pa))
is_(pa.x._parententity, inspect(pa))
is_(pa.x_syn._parentmapper, inspect(Point))
is_(pa.x._parentmapper, inspect(Point))
is_(
pa.x_syn.__clause_element__()._annotations['parententity'],
inspect(pa)
)
is_(
pa.x.__clause_element__()._annotations['parententity'],
inspect(pa)
)
is_(
pa.x_syn.__clause_element__()._annotations['parentmapper'],
inspect(Point))
is_(
pa.x.__clause_element__()._annotations['parentmapper'],
inspect(Point))
class IdentityKeyTest(_fixtures.FixtureTest):
run_inserts = None
def test_identity_key_1(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
key = orm_util.identity_key(User, [1])
eq_(key, (User, (1,)))
key = orm_util.identity_key(User, ident=[1])
eq_(key, (User, (1,)))
def test_identity_key_scalar(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
key = orm_util.identity_key(User, 1)
eq_(key, (User, (1,)))
key = orm_util.identity_key(User, ident=1)
eq_(key, (User, (1,)))
def test_identity_key_2(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
s = create_session()
u = User(name='u1')
s.add(u)
s.flush()
key = orm_util.identity_key(instance=u)
eq_(key, (User, (u.id,)))
def test_identity_key_3(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
row = {users.c.id: 1, users.c.name: "Frank"}
key = orm_util.identity_key(User, row=row)
eq_(key, (User, (1,)))
class PathRegistryTest(_fixtures.FixtureTest):
run_setup_mappers = 'once'
run_inserts = None
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
def test_root_registry(self):
umapper = inspect(self.classes.User)
is_(
RootRegistry()[umapper],
umapper._path_registry
)
eq_(
RootRegistry()[umapper],
PathRegistry.coerce((umapper,))
)
def test_expand(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
path = PathRegistry.coerce((umapper,))
eq_(
path[umapper.attrs.addresses][amapper]
[amapper.attrs.email_address],
PathRegistry.coerce((umapper, umapper.attrs.addresses,
amapper, amapper.attrs.email_address))
)
def test_entity_boolean(self):
umapper = inspect(self.classes.User)
path = PathRegistry.coerce((umapper,))
is_(bool(path), True)
def test_key_boolean(self):
umapper = inspect(self.classes.User)
path = PathRegistry.coerce((umapper, umapper.attrs.addresses))
is_(bool(path), True)
def test_aliased_class(self):
User = self.classes.User
ua = aliased(User)
ua_insp = inspect(ua)
path = PathRegistry.coerce((ua_insp, ua_insp.mapper.attrs.addresses))
assert path.parent.is_aliased_class
def test_indexed_entity(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
path = PathRegistry.coerce((umapper, umapper.attrs.addresses,
amapper, amapper.attrs.email_address))
is_(path[0], umapper)
is_(path[2], amapper)
def test_indexed_key(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
path = PathRegistry.coerce((umapper, umapper.attrs.addresses,
amapper, amapper.attrs.email_address))
eq_(path[1], umapper.attrs.addresses)
eq_(path[3], amapper.attrs.email_address)
def test_slice(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
path = PathRegistry.coerce((umapper, umapper.attrs.addresses,
amapper, amapper.attrs.email_address))
eq_(path[1:3], (umapper.attrs.addresses, amapper))
def test_addition(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
eq_(
p1 + p2,
PathRegistry.coerce((umapper, umapper.attrs.addresses,
amapper, amapper.attrs.email_address))
)
def test_length(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
pneg1 = PathRegistry.coerce(())
p0 = PathRegistry.coerce((umapper,))
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((umapper, umapper.attrs.addresses,
amapper, amapper.attrs.email_address))
eq_(len(pneg1), 0)
eq_(len(p0), 1)
eq_(len(p1), 2)
eq_(len(p2), 3)
eq_(len(p3), 4)
eq_(pneg1.length, 0)
eq_(p0.length, 1)
eq_(p1.length, 2)
eq_(p2.length, 3)
eq_(p3.length, 4)
def test_eq(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
u_alias = inspect(aliased(self.classes.User))
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p3 = PathRegistry.coerce((umapper, umapper.attrs.name))
p4 = PathRegistry.coerce((u_alias, umapper.attrs.addresses))
p5 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p6 = PathRegistry.coerce((amapper, amapper.attrs.user, umapper,
umapper.attrs.addresses))
p7 = PathRegistry.coerce((amapper, amapper.attrs.user, umapper,
umapper.attrs.addresses,
amapper, amapper.attrs.email_address))
is_(p1 == p2, True)
is_(p1 == p3, False)
is_(p1 == p4, False)
is_(p1 == p5, False)
is_(p6 == p7, False)
is_(p6 == p7.parent.parent, True)
is_(p1 != p2, False)
is_(p1 != p3, True)
is_(p1 != p4, True)
is_(p1 != p5, True)
def test_contains_mapper(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
assert p1.contains_mapper(umapper)
assert not p1.contains_mapper(amapper)
def test_path(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
eq_(
p1.path, (umapper, umapper.attrs.addresses)
)
eq_(
p2.path, (umapper, umapper.attrs.addresses, amapper)
)
eq_(
p3.path, (amapper, amapper.attrs.email_address)
)
def test_registry_set(self):
reg = {}
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
p1.set(reg, "p1key", "p1value")
p2.set(reg, "p2key", "p2value")
p3.set(reg, "p3key", "p3value")
eq_(
reg,
{
('p1key', p1.path): 'p1value',
('p2key', p2.path): 'p2value',
('p3key', p3.path): 'p3value',
}
)
def test_registry_get(self):
reg = {}
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
reg.update(
{
('p1key', p1.path): 'p1value',
('p2key', p2.path): 'p2value',
('p3key', p3.path): 'p3value',
}
)
eq_(p1.get(reg, "p1key"), "p1value")
eq_(p2.get(reg, "p2key"), "p2value")
eq_(p2.get(reg, "p1key"), None)
eq_(p3.get(reg, "p3key"), "p3value")
eq_(p3.get(reg, "p1key"), None)
def test_registry_contains(self):
reg = {}
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
reg.update(
{
('p1key', p1.path): 'p1value',
('p2key', p2.path): 'p2value',
('p3key', p3.path): 'p3value',
}
)
assert p1.contains(reg, "p1key")
assert not p1.contains(reg, "p2key")
assert p3.contains(reg, "p3key")
assert not p2.contains(reg, "fake")
def test_registry_setdefault(self):
reg = {}
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
reg.update(
{
('p1key', p1.path): 'p1value',
}
)
p1.setdefault(reg, "p1key", "p1newvalue_a")
p1.setdefault(reg, "p1key_new", "p1newvalue_b")
p2.setdefault(reg, "p2key", "p2newvalue")
eq_(
reg,
{
('p1key', p1.path): 'p1value',
('p1key_new', p1.path): 'p1newvalue_b',
('p2key', p2.path): 'p2newvalue',
}
)
def test_serialize(self):
User = self.classes.User
Address = self.classes.Address
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper,
amapper.attrs.email_address))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
eq_(
p1.serialize(),
[(User, "addresses"), (Address, "email_address")]
)
eq_(
p2.serialize(),
[(User, "addresses"), (Address, None)]
)
eq_(
p3.serialize(),
[(User, "addresses")]
)
def test_deseralize(self):
User = self.classes.User
Address = self.classes.Address
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper,
amapper.attrs.email_address))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
eq_(
PathRegistry.deserialize([(User, "addresses"),
(Address, "email_address")]),
p1
)
eq_(
PathRegistry.deserialize([(User, "addresses"), (Address, None)]),
p2
)
eq_(
PathRegistry.deserialize([(User, "addresses")]),
p3
)
from .inheritance import _poly_fixtures
class PathRegistryInhTest(_poly_fixtures._Polymorphic):
run_setup_mappers = 'once'
run_inserts = None
run_deletes = None
def test_plain(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
pmapper = inspect(Person)
emapper = inspect(Engineer)
p1 = PathRegistry.coerce((pmapper, emapper.attrs.machines))
# given a mapper and an attribute on a subclass,
# the path converts what you get to be against that subclass
eq_(
p1.path,
(emapper, emapper.attrs.machines)
)
def test_plain_compound(self):
Company = _poly_fixtures.Company
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
cmapper = inspect(Company)
pmapper = inspect(Person)
emapper = inspect(Engineer)
p1 = PathRegistry.coerce((cmapper, cmapper.attrs.employees,
pmapper, emapper.attrs.machines))
# given a mapper and an attribute on a subclass,
# the path converts what you get to be against that subclass
eq_(
p1.path,
(cmapper, cmapper.attrs.employees, emapper, emapper.attrs.machines)
)
def test_plain_aliased(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
emapper = inspect(Engineer)
p_alias = aliased(Person)
p_alias = inspect(p_alias)
p1 = PathRegistry.coerce((p_alias, emapper.attrs.machines))
# plain AliasedClass - the path keeps that AliasedClass directly
# as is in the path
eq_(
p1.path,
(p_alias, emapper.attrs.machines)
)
def test_plain_aliased_compound(self):
Company = _poly_fixtures.Company
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
cmapper = inspect(Company)
emapper = inspect(Engineer)
c_alias = aliased(Company)
p_alias = aliased(Person)
c_alias = inspect(c_alias)
p_alias = inspect(p_alias)
p1 = PathRegistry.coerce((c_alias, cmapper.attrs.employees,
p_alias, emapper.attrs.machines))
# plain AliasedClass - the path keeps that AliasedClass directly
# as is in the path
eq_(
p1.path,
(c_alias, cmapper.attrs.employees, p_alias, emapper.attrs.machines)
)
def test_with_poly_sub(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
emapper = inspect(Engineer)
p_poly = with_polymorphic(Person, [Engineer])
e_poly = inspect(p_poly.Engineer)
p_poly = inspect(p_poly)
p1 = PathRegistry.coerce((p_poly, emapper.attrs.machines))
# polymorphic AliasedClass - the path uses _entity_for_mapper()
# to get the most specific sub-entity
eq_(
p1.path,
(e_poly, emapper.attrs.machines)
)
def test_with_poly_base(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
pmapper = inspect(Person)
emapper = inspect(Engineer)
p_poly = with_polymorphic(Person, [Engineer])
p_poly = inspect(p_poly)
# "name" is actually on Person, not Engineer
p1 = PathRegistry.coerce((p_poly, emapper.attrs.name))
# polymorphic AliasedClass - because "name" is on Person,
# we get Person, not Engineer
eq_(
p1.path,
(p_poly, pmapper.attrs.name)
)
def test_with_poly_use_mapper(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
emapper = inspect(Engineer)
p_poly = with_polymorphic(Person, [Engineer], _use_mapper_path=True)
p_poly = inspect(p_poly)
p1 = PathRegistry.coerce((p_poly, emapper.attrs.machines))
# polymorphic AliasedClass with the "use_mapper_path" flag -
# the AliasedClass acts just like the base mapper
eq_(
p1.path,
(emapper, emapper.attrs.machines)
)
|
mit
|
maalmeida1837/deepdive
|
examples/chunking/udf/ext_training.py
|
15
|
1553
|
#! /usr/bin/env python
# extract training data
import fileinput
import json
import itertools
import tags
import sys
tagNames = ['NP', 'VP', 'PP', 'ADJP', 'ADVP', 'SBAR', 'O', 'PRT', 'CONJP', 'INTJ', 'LST', 'B', '']
sentID = 1
# for each word
for row in sys.stdin:
# obj = json.loads(row)
word_id, word, pos, ori_tag, idcol = row.rstrip().split('\t')
tag = ori_tag
# get tag
# TODO json extractor bug...
if ori_tag != '' and ori_tag not in ['NULL', '\N']:
if (tag != 'NULL' and tag != '\N' and tag != 'O'):
tag = tag.split('-')[1]
if tag not in tagNames:
tag = ''
print '\t'.join([str(_) for _ in
sentID, word_id, word, pos, ori_tag, tagNames.index(tag),
'\N' # explicitly output NULL for "id"
])
else:
sentID += 1
print '\t'.join([str(_) for _ in
'\N', word_id, '\N', '\N', '\N', tagNames.index(''),
'\N' # explicitly output NULL for "id"
])
# print json.dumps({
# 'sent_id' : None,
# 'word_id' : obj['word_id'],
# 'word' : None,
# 'pos' : None,
# 'true_tag': None,
# 'tag' : tagNames.index('')
# })
|
apache-2.0
|
jrleja/bsfh
|
demo/demo_params.py
|
3
|
11700
|
import time, sys
import numpy as np
from sedpy.observate import load_filters
from prospect import prospect_args
from prospect.fitting import fit_model
from prospect.io import write_results as writer
# --------------
# RUN_PARAMS
# When running as a script with argparsing, these are ignored. Kept here for backwards compatibility.
# --------------
run_params = {'verbose': True,
'debug': False,
'outfile': 'demo_galphot',
'output_pickles': False,
# Optimization parameters
'do_powell': False,
'ftol': 0.5e-5, 'maxfev': 5000,
'do_levenberg': True,
'nmin': 10,
# emcee fitting parameters
'nwalkers': 128,
'nburn': [16, 32, 64],
'niter': 512,
'interval': 0.25,
'initial_disp': 0.1,
# dynesty Fitter parameters
'nested_bound': 'multi', # bounding method
'nested_sample': 'unif', # sampling method
'nested_nlive_init': 100,
'nested_nlive_batch': 100,
'nested_bootstrap': 0,
'nested_dlogz_init': 0.05,
'nested_weight_kwargs': {"pfrac": 1.0},
'nested_stop_kwargs': {"post_thresh": 0.1},
# Obs data parameters
'objid': 0,
'phottable': 'demo_photometry.dat',
'luminosity_distance': 1e-5, # in Mpc
# Model parameters
'add_neb': False,
'add_duste': False,
# SPS parameters
'zcontinuous': 1,
}
# --------------
# Model Definition
# --------------
def build_model(object_redshift=0.0, fixed_metallicity=None, add_duste=False,
add_neb=False, luminosity_distance=0.0, **extras):
"""Construct a model. This method defines a number of parameter
specification dictionaries and uses them to initialize a
`models.sedmodel.SedModel` object.
:param object_redshift:
If given, given the model redshift to this value.
:param add_dust: (optional, default: False)
Switch to add (fixed) parameters relevant for dust emission.
:param add_neb: (optional, default: False)
Switch to add (fixed) parameters relevant for nebular emission, and
turn nebular emission on.
:param luminosity_distance: (optional)
If present, add a `"lumdist"` parameter to the model, and set it's
value (in Mpc) to this. This allows one to decouple redshift from
distance, and fit, e.g., absolute magnitudes (by setting
luminosity_distance to 1e-5 (10pc))
"""
from prospect.models.templates import TemplateLibrary
from prospect.models import priors, sedmodel
# --- Get a basic delay-tau SFH parameter set. ---
# This has 5 free parameters:
# "mass", "logzsol", "dust2", "tage", "tau"
# And two fixed parameters
# "zred"=0.1, "sfh"=4
# See the python-FSPS documentation for details about most of these
# parameters. Also, look at `TemplateLibrary.describe("parametric_sfh")` to
# view the parameters, their initial values, and the priors in detail.
model_params = TemplateLibrary["parametric_sfh"]
# Add lumdist parameter. If this is not added then the distance is
# controlled by the "zred" parameter and a WMAP9 cosmology.
if luminosity_distance > 0:
model_params["lumdist"] = {"N": 1, "isfree": False,
"init": luminosity_distance, "units":"Mpc"}
# Adjust model initial values (only important for optimization or emcee)
model_params["dust2"]["init"] = 0.1
model_params["logzsol"]["init"] = -0.3
model_params["tage"]["init"] = 13.
model_params["mass"]["init"] = 1e8
# If we are going to be using emcee, it is useful to provide an
# initial scale for the cloud of walkers (the default is 0.1)
# For dynesty these can be skipped
model_params["mass"]["init_disp"] = 1e7
model_params["tau"]["init_disp"] = 3.0
model_params["tage"]["init_disp"] = 5.0
model_params["tage"]["disp_floor"] = 2.0
model_params["dust2"]["disp_floor"] = 0.1
# adjust priors
model_params["dust2"]["prior"] = priors.TopHat(mini=0.0, maxi=2.0)
model_params["tau"]["prior"] = priors.LogUniform(mini=1e-1, maxi=10)
model_params["mass"]["prior"] = priors.LogUniform(mini=1e6, maxi=1e10)
# Change the model parameter specifications based on some keyword arguments
if fixed_metallicity is not None:
# make it a fixed parameter
model_params["logzsol"]["isfree"] = False
#And use value supplied by fixed_metallicity keyword
model_params["logzsol"]['init'] = fixed_metallicity
if object_redshift != 0.0:
# make sure zred is fixed
model_params["zred"]['isfree'] = False
# And set the value to the object_redshift keyword
model_params["zred"]['init'] = object_redshift
if add_duste:
# Add dust emission (with fixed dust SED parameters)
model_params.update(TemplateLibrary["dust_emission"])
if add_neb:
# Add nebular emission (with fixed parameters)
model_params.update(TemplateLibrary["nebular"])
# Now instantiate the model using this new dictionary of parameter specifications
model = sedmodel.SedModel(model_params)
return model
# --------------
# Observational Data
# --------------
# Here we are going to put together some filter names
galex = ['galex_FUV', 'galex_NUV']
spitzer = ['spitzer_irac_ch'+n for n in '1234']
bessell = ['bessell_'+n for n in 'UBVRI']
sdss = ['sdss_{0}0'.format(b) for b in 'ugriz']
# The first filter set is Johnson/Cousins, the second is SDSS. We will use a
# flag in the photometry table to tell us which set to use for each object
# (some were not in the SDSS footprint, and therefore have Johnson/Cousins
# photometry)
#
# All these filters are available in sedpy. If you want to use other filters,
# add their transmission profiles to sedpy/sedpy/data/filters/ with appropriate
# names (and format)
filtersets = (galex + bessell + spitzer,
galex + sdss + spitzer)
def build_obs(objid=0, phottable='demo_photometry.dat',
luminosity_distance=None, **kwargs):
"""Load photometry from an ascii file. Assumes the following columns:
`objid`, `filterset`, [`mag0`,....,`magN`] where N >= 11. The User should
modify this function (including adding keyword arguments) to read in their
particular data format and put it in the required dictionary.
:param objid:
The object id for the row of the photomotery file to use. Integer.
Requires that there be an `objid` column in the ascii file.
:param phottable:
Name (and path) of the ascii file containing the photometry.
:param luminosity_distance: (optional)
The Johnson 2013 data are given as AB absolute magnitudes. They can be
turned into apparent magnitudes by supplying a luminosity distance.
:returns obs:
Dictionary of observational data.
"""
# Writes your code here to read data. Can use FITS, h5py, astropy.table,
# sqlite, whatever.
# e.g.:
# import astropy.io.fits as pyfits
# catalog = pyfits.getdata(phottable)
from prospect.utils.obsutils import fix_obs
# Here we will read in an ascii catalog of magnitudes as a numpy structured
# array
with open(phottable, 'r') as f:
# drop the comment hash
header = f.readline().split()[1:]
catalog = np.genfromtxt(phottable, comments='#',
dtype=np.dtype([(n, np.float) for n in header]))
# Find the right row
ind = catalog['objid'] == float(objid)
# Here we are dynamically choosing which filters to use based on the object
# and a flag in the catalog. Feel free to make this logic more (or less)
# complicated.
filternames = filtersets[int(catalog[ind]['filterset'])]
# And here we loop over the magnitude columns
mags = [catalog[ind]['mag{}'.format(i)] for i in range(len(filternames))]
mags = np.array(mags)
# And since these are absolute mags, we can shift to any distance.
if luminosity_distance is not None:
dm = 25 + 5 * np.log10(luminosity_distance)
mags += dm
# Build output dictionary.
obs = {}
# This is a list of sedpy filter objects. See the
# sedpy.observate.load_filters command for more details on its syntax.
obs['filters'] = load_filters(filternames)
# This is a list of maggies, converted from mags. It should have the same
# order as `filters` above.
obs['maggies'] = np.squeeze(10**(-mags/2.5))
# HACK. You should use real flux uncertainties
obs['maggies_unc'] = obs['maggies'] * 0.07
# Here we mask out any NaNs or infs
obs['phot_mask'] = np.isfinite(np.squeeze(mags))
# We have no spectrum.
obs['wavelength'] = None
obs['spectrum'] = None
# Add unessential bonus info. This will be stored in output
#obs['dmod'] = catalog[ind]['dmod']
obs['objid'] = objid
# This ensures all required keys are present and adds some extra useful info
obs = fix_obs(obs)
return obs
# --------------
# SPS Object
# --------------
def build_sps(zcontinuous=1, compute_vega_mags=False, **extras):
from prospect.sources import CSPSpecBasis
sps = CSPSpecBasis(zcontinuous=zcontinuous,
compute_vega_mags=compute_vega_mags)
return sps
# -----------------
# Noise Model
# ------------------
def build_noise(**extras):
return None, None
# -----------
# Everything
# ------------
def build_all(**kwargs):
return (build_obs(**kwargs), build_model(**kwargs),
build_sps(**kwargs), build_noise(**kwargs))
if __name__ == '__main__':
# - Parser with default arguments -
parser = prospect_args.get_parser()
# - Add custom arguments -
parser.add_argument('--object_redshift', type=float, default=0.0,
help=("Redshift for the model"))
parser.add_argument('--add_neb', action="store_true",
help="If set, add nebular emission in the model (and mock).")
parser.add_argument('--add_duste', action="store_true",
help="If set, add dust emission to the model.")
parser.add_argument('--luminosity_distance', type=float, default=1e-5,
help=("Luminosity distance in Mpc. Defaults to 10pc "
"(for case of absolute mags)"))
parser.add_argument('--phottable', type=str, default="demo_photometry.dat",
help="Names of table from which to get photometry.")
parser.add_argument('--objid', type=int, default=0,
help="zero-index row number in the table to fit.")
args = parser.parse_args()
run_params = vars(args)
obs, model, sps, noise = build_all(**run_params)
run_params["sps_libraries"] = sps.ssp.libraries
run_params["param_file"] = __file__
print(model)
if args.debug:
sys.exit()
#hfile = setup_h5(model=model, obs=obs, **run_params)
hfile = "{0}_{1}_mcmc.h5".format(args.outfile, int(time.time()))
output = fit_model(obs, model, sps, noise, **run_params)
writer.write_hdf5(hfile, run_params, model, obs,
output["sampling"][0], output["optimization"][0],
tsample=output["sampling"][1],
toptimize=output["optimization"][1],
sps=sps)
try:
hfile.close()
except(AttributeError):
pass
|
mit
|
KarrLab/kinetic_datanator
|
datanator/data_source/metabolites_meta_collection.py
|
1
|
14782
|
from datanator_query_python.query import query_sabiork, query_xmdb
from datanator.util import chem_util
from datanator.util import file_util
from datanator.util import index_collection
import datanator.config.core
import pymongo
import re
from pymongo.collation import Collation, CollationStrength
class MetabolitesMeta(query_sabiork.QuerySabio):
''' meta_loc: database location to save the meta collection
'''
def __init__(self, cache_dirname=None, MongoDB=None, replicaSet=None, db=None,
verbose=False, max_entries=float('inf'), username = None,
password = None, authSource = 'admin', meta_loc = None):
self.cache_dirname = cache_dirname
self.verbose = verbose
self.MongoDB = MongoDB
self.replicaSet = replicaSet
self.max_entries = max_entries
self.username = username
self.password = password
self.authSource = authSource
self.meta_loc = meta_loc
super(MetabolitesMeta, self).__init__(cache_dirname=cache_dirname, MongoDB=MongoDB, replicaSet=replicaSet,
db=db, verbose=verbose, max_entries=max_entries, username = username,
password = password, authSource = authSource)
self.frequency = 50
self.chem_manager = chem_util.ChemUtil()
self.file_manager = file_util.FileUtil()
self.ymdb_query = query_xmdb.QueryXmdb(username=username, password=password, server=MongoDB, authSource=authSource,
database=db, collection_str='ymdb', readPreference='nearest')
self.ecmdb_query = query_xmdb.QueryXmdb(username=username, password=password, server=MongoDB, authSource=authSource,
database=db, collection_str='ecmdb', readPreference='nearest')
self.collation = Collation('en', strength=CollationStrength.SECONDARY)
self.client, self.db, self.collection = self.con_db('metabolites_meta')
def load_content(self):
collection_name = 'metabolites_meta'
ecmdb_fields = ['m2m_id', 'inchi', 'synonyms.synonym']
self.fill_metabolite_fields(
fields=ecmdb_fields, collection_src='ecmdb', collection_des = collection_name)
ymdb_fields = ['ymdb_id', 'inchi', 'synonyms.synonym']
self.fill_metabolite_fields(
fields=ymdb_fields, collection_src='ymdb', collection_des = collection_name)
_, _, collection = self.con_db(collection_name)
k = 0
for doc in self.collection.find(filter={}, projection={'inchi':1}):
if k > self.max_entries:
break
kinlaw_id = self.get_kinlawid_by_inchi([doc['inchi']])
rxn_participants = self.find_reaction_participants(kinlaw_id)
collection.update_one({'inchi': doc['inchi']},
{'$set': {'kinlaw_id': kinlaw_id,
'reaction_participants': rxn_participants}},
upsert=False)
k += 1
# i = 0
# cursor = collection.find(filter = {}, projection = {'similar_compounds_corrected':1, 'similar_compounds': 1})
# for doc in cursor:
# if i % self.frequency == 0:
# print(i)
# replacement = []
# for corrected in doc['similar_compounds_corrected']:
# for k, v in corrected.items():
# dic = {}
# dic[k] = v
# replacement.append(dic)
# collection.update_one({'_id': doc['_id']},
# {'$set': {'similar_compounds': replacement}},
# upsert=False)
# i += 1
def replace_key_in_similar_compounds(self):
query = {}
projection = {'similar_compounds': 1}
_, _, col = self.con_db('metabolites_meta')
docs = col.find(filter=query, projection=projection)
for doc in docs:
result = []
_list = doc['similar_compounds']
for dic in _list:
old_key = list(dic.keys())[0]
try:
new_key = col.find_one(filter={'inchi': old_key},
projection={'InChI_Key':1})['InChI_Key']
result.append( {new_key: dic[old_key]})
except TypeError:
result.append( {'NoStructure': -1} )
col.update_one({'_id': doc['_id']},
{'$set': {'similar_compounds': result} })
def fill_metabolite_fields(self, fields=None, collection_src=None, collection_des = None):
'''Fill in values of fields of interest from
metabolite collection: ecmdb or ymdb
Args:
fileds: list of fields of interest
collection_src: collection in which query will be done
collection_des: collection in which result will be updated
'''
projection = {}
for field in fields:
projection[field] = 1
projection['_id'] = 0
_, _, col_src = self.con_db(collection_src)
_, _, col_des = self.con_db(collection_des)
cursor = col_src.find(filter={}, projection=projection)
i = 0
for doc in cursor:
if i == self.max_entries:
break
if i % self.frequency == 0:
print('Getting fields of interest from {} document in {}'.format(i, collection_src))
doc['InChI_Key'] = self.chem_manager.inchi_to_inchikey(doc['inchi'])
if isinstance(doc.get('synonyms'), list):
continue
try:
synonyms = doc.get('synonyms', None).get('synonym')
except AttributeError:
synonyms = doc.get('synonyms', None)
col_des.update_one({'inchi': doc['inchi']},
{ '$set': { fields[0]: doc[fields[0]],
fields[1]: doc[fields[1]],
'synonyms': synonyms,
'InChI_Key': doc['InChI_Key']}},
upsert=True)
i += 1
def fill_names(self):
"""Fill names of metabolites in 'name' field
"""
docs = self.collection.find({})
count = self.collection.count_documents({})
for i, doc in enumerate(docs):
name = ''
inchi_key = doc['InChI_Key']
if i == self.max_entries:
break
if i % 100 == 0 and self.verbose:
print('Adding name to document {} out of {}.'.format(i, count))
if doc.get('ymdb_id') is None:
name = self.ecmdb_query.get_name_by_inchikey(inchi_key)
else:
name = self.ymdb_query.get_name_by_inchikey(inchi_key)
self.collection.update_one({'_id': doc['_id']},
{'$set': {'name': name}}, upsert=False)
def fill_standard_id(self, skip=0):
"""Fill meta collection with chebi_id, pubmed_id,
and kegg_id.
Args:
skip (:obj:`int`): skip first n number of records.
"""
con_0 = {'chebi_id': {'$exists': False}}
con_1 = {'chebi_id': None}
query = {'$or': [con_0, con_1]}
docs = self.collection.find(query, skip=skip)
count = self.collection.count_documents(query)
for i, doc in enumerate(docs):
if i == self.max_entries:
break
if i % 100 == 0 and self.verbose:
print('Processing doc {} out of {}'.format(i+skip, count))
m2m_id = doc.get('m2m_id')
ymdb_id = doc.get('ymdb_id')
if ymdb_id == 'YMDB00890' or ymdb_id == 'YMDB00862':
continue
if ymdb_id is not None: # ymdb has richer data than ecmdb
doc_e = self.ymdb_query.get_standard_ids_by_id(ymdb_id)
if doc_e['synonyms']:
synonyms = doc_e['synonyms']['synonym']
else:
synonyms = None
self.collection.update_many({'ymdb_id': ymdb_id},
{'$set': {'chebi_id': doc_e['chebi_id'],
'hmdb_id': doc_e['hmdb_id'],
'kegg_id': doc_e['kegg_id'],
'description': doc_e['description'],
'chemical_formula': doc_e['chemical_formula'],
'average_molecular_weight': doc_e['average_molecular_weight'],
'cas_registry_number': doc_e['cas_registry_number'],
'smiles': doc_e['smiles'],
'cellular_locations': doc_e['cellular_locations'],
'pubchem_compound_id': doc_e['pubchem_compound_id'],
'chemspider_id': doc_e['chemspider_id'],
'biocyc_id': doc_e['biocyc_id'],
'pathways': doc_e['pathways'],
'property': doc_e['property'],
'name': doc_e['name'],
'synonyms': synonyms}}, upsert=False)
elif m2m_id is not None:
doc_y = self.ecmdb_query.get_standard_ids_by_id(m2m_id)
if doc_y['synonyms']:
synonyms = doc_y['synonyms']['synonym']
else:
synonyms = None
self.collection.update_many({'m2m_id': m2m_id},
{'$set': {'chebi_id': doc_y['chebi_id'],
'hmdb_id': doc_y['hmdb_id'],
'kegg_id': doc_y['kegg_id'],
'description': doc_y['description'],
'chemical_formula': doc_y['chemical_formula'],
'average_molecular_weight': doc_y['average_molecular_weight'],
'cas_registry_number': doc_y['cas_registry_number'],
'smiles': doc_y['smiles'],
'cellular_locations': doc_y['cellular_locations'],
'pubchem_compound_id': doc_y['pubchem_compound_id'],
'chemspider_id': doc_y['chemspider_id'],
'biocyc_id': doc_y['biocyc_id'],
'pathways': doc_y['pathways'],
'property': doc_y['property'],
'name': doc_y['name'],
'synonyms': synonyms}}, upsert=False)
else:
continue
def remove_dups(self, _key):
"""Remove entries with the same _key.
Args:
_key(:obj:`str`): Name of fields in which dups will be identified.
"""
num, docs = self.get_duplicates('metabolites_meta', _key)
return num, docs
def reset_cellular_locations(self, start=0):
"""Github (https://github.com/KarrLab/datanator_rest_api/issues/69)
"""
query = {'cellular_locations': {'$ne': None}}
count = self.collection.count_documents(query) - start
for i, doc in enumerate(self.collection.find(filter=query, skip=start,
projection={'m2m_id': 1, 'ymdb_id': 1,
'cellular_locations': 1})):
if i == self.max_entries:
break
if self.verbose and i % 100 == 0:
print('Processing doc {} out of {} ...'.format(i, count))
cell_locations = doc['cellular_locations']
obj = []
if doc.get('ymdb_id'):
for loc in cell_locations:
location = loc['cellular_location']['cellular_location']
obj.append({
'reference': ['YMDB'],
'cellular_location': location
})
else:
for loc in cell_locations:
location = loc['cellular_location']['cellular_location']
obj.append({
'reference': ['ECMDB'],
'cellular_location': location
})
self.collection.update_one({'_id': doc['_id']},
{'$set': {'cellular_locations': obj}},
upsert=False)
def main():
db = 'datanator'
meta_loc = 'datanator'
username = datanator.config.core.get_config()['datanator']['mongodb']['user']
password = datanator.config.core.get_config()['datanator']['mongodb']['password']
MongoDB = datanator.config.core.get_config()['datanator']['mongodb']['server']
manager = MetabolitesMeta(cache_dirname=None, MongoDB=MongoDB, db=db,
verbose=True, max_entries=float('inf'),
username = username, password = password, meta_loc = meta_loc)
# # manager.load_content()
# collection_name = 'metabolites_meta'
# manager.fill_metabolite_fields(fields=['m2m_id', 'inchi', 'synonyms.synonym'],
# collection_src='ecmdb', collection_des = collection_name)
# manager.fill_metabolite_fields(fields=['ymdb_id', 'inchi', 'synonyms.synonym'],
# collection_src='ymdb',
# collection_des = collection_name)
# manager.fill_names()
# manager.fill_standard_id(skip=0)
# num, _ = manager.remove_dups('InChI_Key')
# print(num)
manager.reset_cellular_locations()
if __name__ == '__main__':
main()
|
mit
|
teamfx/openjfx-9-dev-rt
|
modules/javafx.web/src/main/native/Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/__init__.py
|
3
|
1196
|
# Required for Python to search this directory for module files
# Keep this file free of any code or import statements that could
# cause either an error to occur or a log message to be logged.
# This ensures that calling code can import initialization code from
# webkitpy before any errors or log messages due to code in this file.
# Initialization code can include things like version-checking code and
# logging configuration code.
#
# We do not execute any version-checking code or logging configuration
# code in this file so that callers can opt-in as they want. This also
# allows different callers to choose different initialization code,
# as necessary.
import os
import imp
from webkitpy.benchmark_runner.utils import load_subclasses
from http_server_driver_factory import HTTPServerDriverFactory
def http_server_driver_loader(http_server_driver_class):
for platform in http_server_driver_class.platforms:
HTTPServerDriverFactory.add(platform, http_server_driver_class)
load_subclasses(
dirname=os.path.dirname(os.path.abspath(__file__)),
base_class_name='HTTPServerDriver',
base_class_file='http_server_driver.py',
loader=http_server_driver_loader)
|
gpl-2.0
|
cloudera/hue
|
desktop/core/ext-py/dnspython-1.15.0/dns/ttl.py
|
16
|
2283
|
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS TTL conversion."""
import dns.exception
from ._compat import long
class BadTTL(dns.exception.SyntaxError):
"""DNS TTL value is not well-formed."""
def from_text(text):
"""Convert the text form of a TTL to an integer.
The BIND 8 units syntax for TTLs (e.g. '1w6d4h3m10s') is supported.
@param text: the textual TTL
@type text: string
@raises dns.ttl.BadTTL: the TTL is not well-formed
@rtype: int
"""
if text.isdigit():
total = long(text)
else:
if not text[0].isdigit():
raise BadTTL
total = long(0)
current = long(0)
for c in text:
if c.isdigit():
current *= 10
current += long(c)
else:
c = c.lower()
if c == 'w':
total += current * long(604800)
elif c == 'd':
total += current * long(86400)
elif c == 'h':
total += current * long(3600)
elif c == 'm':
total += current * long(60)
elif c == 's':
total += current
else:
raise BadTTL("unknown unit '%s'" % c)
current = 0
if not current == 0:
raise BadTTL("trailing integer")
if total < long(0) or total > long(2147483647):
raise BadTTL("TTL should be between 0 and 2^31 - 1 (inclusive)")
return total
|
apache-2.0
|
aweisberg/cassandra-dtest
|
streaming_test.py
|
5
|
4250
|
import logging
import operator
import pytest
from cassandra import ConsistencyLevel
from pytest import mark
from dtest import Tester, create_ks, create_cf
from tools.data import insert_c1c2
since = pytest.mark.since
logger = logging.getLogger(__name__)
opmap = {
operator.eq: "==",
operator.gt: ">",
operator.lt: "<",
operator.ne: "!=",
operator.ge: ">=",
operator.le: "<="
}
class TestStreaming(Tester):
@pytest.fixture(autouse=True)
def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
fixture_dtest_setup.ignore_log_patterns = (
# This one occurs when trying to send the migration to a
# node that hasn't started yet, and when it does, it gets
# replayed and everything is fine.
r'Can\'t send migration request: node.*is down',
# ignore streaming error during bootstrap
r'Exception encountered during startup',
r'Streaming error occurred'
)
def _test_streaming(self, op_zerocopy, op_partial, num_partial, num_zerocopy,
compaction_strategy='LeveledCompactionStrategy', num_keys=1000, rf=3, num_nodes=3):
keys = num_keys
cluster = self.cluster
tokens = cluster.balanced_tokens(num_nodes)
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
cluster.set_configuration_options(values={'num_tokens': 1})
cluster.populate(num_nodes)
nodes = cluster.nodelist()
for i in range(0, len(nodes)):
nodes[i].set_configuration_options(values={'initial_token': tokens[i]})
cluster.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(nodes[0])
create_ks(session, name='ks2', rf=rf)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'},
compaction_strategy=compaction_strategy)
insert_c1c2(session, n=keys, consistency=ConsistencyLevel.ALL)
session_n2 = self.patient_exclusive_cql_connection(nodes[1])
session_n2.execute("TRUNCATE system.available_ranges;")
mark = nodes[1].mark_log()
nodes[1].nodetool('rebuild -ks ks2')
nodes[1].watch_log_for('Completed submission of build tasks', filename='debug.log', timeout=120)
zerocopy_streamed_sstable = len(
nodes[1].grep_log('.*CassandraEntireSSTableStreamReader.*?Finished receiving Data.*', filename='debug.log',
from_mark=mark))
partial_streamed_sstable = len(
nodes[1].grep_log('.*CassandraStreamReader.*?Finished receiving file.*', filename='debug.log',
from_mark=mark))
assert op_zerocopy(zerocopy_streamed_sstable, num_zerocopy), "%s %s %s" % (num_zerocopy, opmap.get(op_zerocopy),
zerocopy_streamed_sstable)
assert op_partial(partial_streamed_sstable, num_partial), "%s %s %s" % (num_partial, op_partial,
partial_streamed_sstable)
@since('4.0')
def test_zerocopy_streaming(self):
self._test_streaming(op_zerocopy=operator.gt, op_partial=operator.eq, num_zerocopy=1, num_partial=0,
num_nodes=2, rf=2)
@since('4.0')
def test_zerocopy_streaming_leveled_compaction(self):
self._test_streaming(op_zerocopy=operator.gt, op_partial=operator.gt, num_zerocopy=1, num_partial=1, rf=2)
@mark.xfail(reason="Not implemented yet. Should be functional after CASSANDRA-10540, CASSANDRA-14586 are fixed.")
@since('4.0')
def test_zerocopy_streaming_size_tiered_compaction(self):
self._test_streaming(op_zerocopy=operator.gt, op_partial=operator.gt, num_zerocopy=1, num_partial=1, rf=2,
num_nodes=3, compaction_strategy='SizeTieredCompactionStrategy')
@since('4.0')
def test_zerocopy_streaming_no_replication(self):
self._test_streaming(op_zerocopy=operator.eq, op_partial=operator.eq, num_zerocopy=0, num_partial=0, rf=1,
num_nodes=3)
|
apache-2.0
|
gurneyalex/OpenUpgrade
|
addons/report/models/report.py
|
22
|
23158
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools import config
from openerp.tools.translate import _
from openerp.addons.web.http import request
from openerp.tools.safe_eval import safe_eval as eval
import re
import time
import base64
import logging
import tempfile
import lxml.html
import cStringIO
import subprocess
from distutils.version import LooseVersion
try:
from pyPdf import PdfFileWriter, PdfFileReader
except ImportError:
PdfFileWriter = PdfFileReader = None
_logger = logging.getLogger(__name__)
"""Check the presence of wkhtmltopdf and return its version at OpnerERP start-up."""
wkhtmltopdf_state = 'install'
try:
process = subprocess.Popen(
['wkhtmltopdf', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except OSError:
_logger.info('You need wkhtmltopdf to print a pdf version of the reports.')
else:
out, err = process.communicate()
version = re.search('([0-9.]+)', out).group(0)
if LooseVersion(version) < LooseVersion('0.12.0'):
_logger.info('Upgrade wkhtmltopdf to (at least) 0.12.0')
wkhtmltopdf_state = 'upgrade'
else:
wkhtmltopdf_state = 'ok'
if config['workers'] == 1:
_logger.info('You need to start OpenERP with at least two workers to print a pdf version of the reports.')
wkhtmltopdf_state = 'workers'
class Report(osv.Model):
_name = "report"
_description = "Report"
public_user = None
MINIMAL_HTML_PAGE = """
<base href="{base_url}">
<!DOCTYPE html>
<html style="height: 0;">
<head>
<link href="/report/static/src/css/reset.min.css" rel="stylesheet"/>
<link href="/web/static/lib/bootstrap/css/bootstrap.css" rel="stylesheet"/>
<link href="/website/static/src/css/website.css" rel="stylesheet"/>
<link href="/web/static/lib/fontawesome/css/font-awesome.css" rel="stylesheet"/>
<style type='text/css'>{css}</style>
{subst}
</head>
<body class="container" onload="subst()">
{body}
</body>
</html>"""
#--------------------------------------------------------------------------
# Extension of ir_ui_view.render with arguments frequently used in reports
#--------------------------------------------------------------------------
def render(self, cr, uid, ids, template, values=None, context=None):
"""Allow to render a QWeb template python-side. This function returns the 'ir.ui.view'
render but embellish it with some variables/methods used in reports.
:param values: additionnal methods/variables used in the rendering
:returns: html representation of the template
"""
if values is None:
values = {}
if context is None:
context = {}
view_obj = self.pool['ir.ui.view']
def translate_doc(doc_id, model, lang_field, template):
"""Helper used when a report should be translated into a specific lang.
<t t-foreach="doc_ids" t-as="doc_id">
<t t-raw="translate_doc(doc_id, doc_model, 'partner_id.lang', account.report_invoice_document')"/>
</t>
:param doc_id: id of the record to translate
:param model: model of the record to translate
:param lang_field': field of the record containing the lang
:param template: name of the template to translate into the lang_field
"""
ctx = context.copy()
doc = self.pool[model].browse(cr, uid, doc_id, context=ctx)
qcontext = values.copy()
# Do not force-translate if we chose to display the report in a specific lang
if ctx.get('translatable') is True:
qcontext['o'] = doc
else:
# Reach the lang we want to translate the doc into
ctx['lang'] = eval('doc.%s' % lang_field, {'doc': doc})
qcontext['o'] = self.pool[model].browse(cr, uid, doc_id, context=ctx)
return view_obj.render(cr, uid, template, qcontext, context=ctx)
user = self.pool['res.users'].browse(cr, uid, uid)
website = None
if request and hasattr(request, 'website'):
website = request.website
values.update({
'time': time,
'translate_doc': translate_doc,
'editable': True, # Will active inherit_branding
'user': user,
'res_company': user.company_id,
'website': website,
})
return view_obj.render(cr, uid, template, values, context=context)
#--------------------------------------------------------------------------
# Main report methods
#--------------------------------------------------------------------------
def get_html(self, cr, uid, ids, report_name, data=None, context=None):
"""This method generates and returns html version of a report.
"""
# If the report is using a custom model to render its html, we must use it.
# Otherwise, fallback on the generic html rendering.
try:
report_model_name = 'report.%s' % report_name
particularreport_obj = self.pool[report_model_name]
return particularreport_obj.render_html(cr, uid, ids, data=data, context=context)
except KeyError:
report = self._get_report_from_name(cr, uid, report_name)
report_obj = self.pool[report.model]
docs = report_obj.browse(cr, uid, ids, context=context)
docargs = {
'doc_ids': ids,
'doc_model': report.model,
'docs': docs,
}
return self.render(cr, uid, [], report.report_name, docargs, context=context)
def get_pdf(self, cr, uid, ids, report_name, html=None, data=None, context=None):
"""This method generates and returns pdf version of a report.
"""
if context is None:
context = {}
if html is None:
html = self.get_html(cr, uid, ids, report_name, data=data, context=context)
html = html.decode('utf-8') # Ensure the current document is utf-8 encoded.
# Get the ir.actions.report.xml record we are working on.
report = self._get_report_from_name(cr, uid, report_name)
# Check if we have to save the report or if we have to get one from the db.
save_in_attachment = self._check_attachment_use(cr, uid, ids, report)
# Get the paperformat associated to the report, otherwise fallback on the company one.
if not report.paperformat_id:
user = self.pool['res.users'].browse(cr, uid, uid)
paperformat = user.company_id.paperformat_id
else:
paperformat = report.paperformat_id
# Preparing the minimal html pages
subst = "<script src='/report/static/src/js/subst.js'></script> "
css = '' # Will contain local css
headerhtml = []
contenthtml = []
footerhtml = []
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
# The received html report must be simplified. We convert it in a xml tree
# in order to extract headers, bodies and footers.
try:
root = lxml.html.fromstring(html)
for node in root.xpath("//html/head/style"):
css += node.text
for node in root.xpath("//div[@class='header']"):
body = lxml.html.tostring(node)
header = self.MINIMAL_HTML_PAGE.format(css=css, subst=subst, body=body, base_url=base_url)
headerhtml.append(header)
for node in root.xpath("//div[@class='footer']"):
body = lxml.html.tostring(node)
footer = self.MINIMAL_HTML_PAGE.format(css=css, subst=subst, body=body, base_url=base_url)
footerhtml.append(footer)
for node in root.xpath("//div[@class='page']"):
# Previously, we marked some reports to be saved in attachment via their ids, so we
# must set a relation between report ids and report's content. We use the QWeb
# branding in order to do so: searching after a node having a data-oe-model
# attribute with the value of the current report model and read its oe-id attribute
oemodelnode = node.find(".//*[@data-oe-model='%s']" % report.model)
if oemodelnode is not None:
reportid = oemodelnode.get('data-oe-id')
if reportid:
reportid = int(reportid)
else:
reportid = False
body = lxml.html.tostring(node)
reportcontent = self.MINIMAL_HTML_PAGE.format(css=css, subst='', body=body, base_url=base_url)
# FIXME: imo the best way to extract record id from html reports is by using the
# qweb branding. As website editor is not yet splitted in a module independant from
# website, when we print a unique report we can use the id passed in argument to
# identify it.
if ids and len(ids) == 1:
reportid = ids[0]
contenthtml.append(tuple([reportid, reportcontent]))
except lxml.etree.XMLSyntaxError:
contenthtml = []
contenthtml.append(html)
save_in_attachment = {} # Don't save this potentially malformed document
# Get paperformat arguments set in the root html tag. They are prioritized over
# paperformat-record arguments.
specific_paperformat_args = {}
for attribute in root.items():
if attribute[0].startswith('data-report-'):
specific_paperformat_args[attribute[0]] = attribute[1]
# Run wkhtmltopdf process
pdf = self._generate_wkhtml_pdf(
cr, uid, headerhtml, footerhtml, contenthtml, context.get('landscape'),
paperformat, specific_paperformat_args, save_in_attachment
)
return pdf
def get_action(self, cr, uid, ids, report_name, data=None, context=None):
"""Return an action of type ir.actions.report.xml.
:param ids: Ids of the records to print (if not used, pass an empty list)
:param report_name: Name of the template to generate an action for
"""
if ids:
if not isinstance(ids, list):
ids = [ids]
context['active_ids'] = ids
report_obj = self.pool['ir.actions.report.xml']
idreport = report_obj.search(cr, uid, [('report_name', '=', report_name)], context=context)
try:
report = report_obj.browse(cr, uid, idreport[0], context=context)
except IndexError:
raise osv.except_osv(
_('Bad Report Reference'),
_('This report is not loaded into the database: %s.' % report_name)
)
return {
'context': context,
'data': data,
'type': 'ir.actions.report.xml',
'report_name': report.report_name,
'report_type': report.report_type,
'report_file': report.report_file,
'context': context,
}
#--------------------------------------------------------------------------
# Report generation helpers
#--------------------------------------------------------------------------
def _check_attachment_use(self, cr, uid, ids, report):
""" Check attachment_use field. If set to true and an existing pdf is already saved, load
this one now. Else, mark save it.
"""
save_in_attachment = {}
if report.attachment_use is True:
save_in_attachment['model'] = report.model
save_in_attachment['loaded_documents'] = {}
for record_id in ids:
obj = self.pool[report.model].browse(cr, uid, record_id)
filename = eval(report.attachment, {'object': obj, 'time': time})
if filename is False: # May be false if, for instance, the record is in draft state
continue
else:
alreadyindb = [('datas_fname', '=', filename),
('res_model', '=', report.model),
('res_id', '=', record_id)]
attach_ids = self.pool['ir.attachment'].search(cr, uid, alreadyindb)
if attach_ids:
# Add the loaded pdf in the loaded_documents list
pdf = self.pool['ir.attachment'].browse(cr, uid, attach_ids[0]).datas
pdf = base64.decodestring(pdf)
save_in_attachment['loaded_documents'][record_id] = pdf
_logger.info('The PDF document %s was loaded from the database' % filename)
else:
# Mark current document to be saved
save_in_attachment[record_id] = filename
return save_in_attachment
def _check_wkhtmltopdf(self):
return wkhtmltopdf_state
def _generate_wkhtml_pdf(self, cr, uid, headers, footers, bodies, landscape, paperformat, spec_paperformat_args=None, save_in_attachment=None):
"""Execute wkhtmltopdf as a subprocess in order to convert html given in input into a pdf
document.
:param header: list of string containing the headers
:param footer: list of string containing the footers
:param bodies: list of string containing the reports
:param landscape: boolean to force the pdf to be rendered under a landscape format
:param paperformat: ir.actions.report.paperformat to generate the wkhtmltopf arguments
:param specific_paperformat_args: dict of prioritized paperformat arguments
:param save_in_attachment: dict of reports to save/load in/from the db
:returns: Content of the pdf as a string
"""
command = ['wkhtmltopdf']
command_args = []
tmp_dir = tempfile.gettempdir()
# Passing the cookie to wkhtmltopdf in order to resolve internal links.
try:
if request:
command_args.extend(['--cookie', 'session_id', request.session.sid])
except AttributeError:
pass
# Wkhtmltopdf arguments
command_args.extend(['--quiet']) # Less verbose error messages
if paperformat:
# Convert the paperformat record into arguments
command_args.extend(self._build_wkhtmltopdf_args(paperformat, spec_paperformat_args))
# Force the landscape orientation if necessary
if landscape and '--orientation' in command_args:
command_args_copy = list(command_args)
for index, elem in enumerate(command_args_copy):
if elem == '--orientation':
del command_args[index]
del command_args[index]
command_args.extend(['--orientation', 'landscape'])
elif landscape and not '--orientation' in command_args:
command_args.extend(['--orientation', 'landscape'])
# Execute WKhtmltopdf
pdfdocuments = []
for index, reporthtml in enumerate(bodies):
local_command_args = []
pdfreport = tempfile.NamedTemporaryFile(suffix='.pdf', prefix='report.tmp.', mode='w+b')
# Directly load the document if we already have it
if save_in_attachment and save_in_attachment['loaded_documents'].get(reporthtml[0]):
pdfreport.write(save_in_attachment['loaded_documents'].get(reporthtml[0]))
pdfreport.seek(0)
pdfdocuments.append(pdfreport)
continue
# Wkhtmltopdf handles header/footer as separate pages. Create them if necessary.
if headers:
head_file = tempfile.NamedTemporaryFile(suffix='.html', prefix='report.header.tmp.', dir=tmp_dir, mode='w+')
head_file.write(headers[index])
head_file.seek(0)
local_command_args.extend(['--header-html', head_file.name])
if footers:
foot_file = tempfile.NamedTemporaryFile(suffix='.html', prefix='report.footer.tmp.', dir=tmp_dir, mode='w+')
foot_file.write(footers[index])
foot_file.seek(0)
local_command_args.extend(['--footer-html', foot_file.name])
# Body stuff
content_file = tempfile.NamedTemporaryFile(suffix='.html', prefix='report.body.tmp.', dir=tmp_dir, mode='w+')
content_file.write(reporthtml[1])
content_file.seek(0)
try:
wkhtmltopdf = command + command_args + local_command_args
wkhtmltopdf += [content_file.name] + [pdfreport.name]
process = subprocess.Popen(wkhtmltopdf, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
if process.returncode not in [0, 1]:
raise osv.except_osv(_('Report (PDF)'),
_('Wkhtmltopdf failed (error code: %s). '
'Message: %s') % (str(process.returncode), err))
# Save the pdf in attachment if marked
if reporthtml[0] is not False and save_in_attachment.get(reporthtml[0]):
attachment = {
'name': save_in_attachment.get(reporthtml[0]),
'datas': base64.encodestring(pdfreport.read()),
'datas_fname': save_in_attachment.get(reporthtml[0]),
'res_model': save_in_attachment.get('model'),
'res_id': reporthtml[0],
}
self.pool['ir.attachment'].create(cr, uid, attachment)
_logger.info('The PDF document %s is now saved in the '
'database' % attachment['name'])
pdfreport.seek(0)
pdfdocuments.append(pdfreport)
if headers:
head_file.close()
if footers:
foot_file.close()
except:
raise
# Return the entire document
if len(pdfdocuments) == 1:
content = pdfdocuments[0].read()
pdfdocuments[0].close()
else:
content = self._merge_pdf(pdfdocuments)
return content
def _get_report_from_name(self, cr, uid, report_name):
"""Get the first record of ir.actions.report.xml having the ``report_name`` as value for
the field report_name.
"""
report_obj = self.pool['ir.actions.report.xml']
qwebtypes = ['qweb-pdf', 'qweb-html']
conditions = [('report_type', 'in', qwebtypes), ('report_name', '=', report_name)]
idreport = report_obj.search(cr, uid, conditions)[0]
return report_obj.browse(cr, uid, idreport)
def _build_wkhtmltopdf_args(self, paperformat, specific_paperformat_args=None):
"""Build arguments understandable by wkhtmltopdf from a report.paperformat record.
:paperformat: report.paperformat record
:specific_paperformat_args: a dict containing prioritized wkhtmltopdf arguments
:returns: list of string representing the wkhtmltopdf arguments
"""
command_args = []
if paperformat.format and paperformat.format != 'custom':
command_args.extend(['--page-size', paperformat.format])
if paperformat.page_height and paperformat.page_width and paperformat.format == 'custom':
command_args.extend(['--page-width', str(paperformat.page_width) + 'mm'])
command_args.extend(['--page-height', str(paperformat.page_height) + 'mm'])
if specific_paperformat_args and specific_paperformat_args.get('data-report-margin-top'):
command_args.extend(['--margin-top', str(specific_paperformat_args['data-report-margin-top'])])
elif paperformat.margin_top:
command_args.extend(['--margin-top', str(paperformat.margin_top)])
if specific_paperformat_args and specific_paperformat_args.get('data-report-dpi'):
command_args.extend(['--dpi', str(specific_paperformat_args['data-report-dpi'])])
elif paperformat.dpi:
command_args.extend(['--dpi', str(paperformat.dpi)])
if specific_paperformat_args and specific_paperformat_args.get('data-report-header-spacing'):
command_args.extend(['--header-spacing', str(specific_paperformat_args['data-report-header-spacing'])])
elif paperformat.header_spacing:
command_args.extend(['--header-spacing', str(paperformat.header_spacing)])
if paperformat.margin_left:
command_args.extend(['--margin-left', str(paperformat.margin_left)])
if paperformat.margin_bottom:
command_args.extend(['--margin-bottom', str(paperformat.margin_bottom)])
if paperformat.margin_right:
command_args.extend(['--margin-right', str(paperformat.margin_right)])
if paperformat.orientation:
command_args.extend(['--orientation', str(paperformat.orientation)])
if paperformat.header_line:
command_args.extend(['--header-line'])
return command_args
def _merge_pdf(self, documents):
"""Merge PDF files into one.
:param documents: list of pdf files
:returns: string containing the merged pdf
"""
writer = PdfFileWriter()
for document in documents:
reader = PdfFileReader(file(document.name, "rb"))
for page in range(0, reader.getNumPages()):
writer.addPage(reader.getPage(page))
document.close()
merged = cStringIO.StringIO()
writer.write(merged)
merged.seek(0)
content = merged.read()
merged.close()
return content
|
agpl-3.0
|
40223249-1/2015cd_midterm2
|
static/Brython3.1.1-20150328-091302/Lib/genericpath.py
|
727
|
3093
|
"""
Path operations common to more than one OS
Do not use directly. The OS specific modules import the appropriate
functions from this module themselves.
"""
import os
import stat
__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime',
'getsize', 'isdir', 'isfile']
# Does a path exist?
# This is false for dangling symbolic links on systems that support them.
def exists(path):
"""Test whether a path exists. Returns False for broken symbolic links"""
try:
os.stat(path)
except os.error:
return False
return True
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path on systems that support symlinks
def isfile(path):
"""Test whether a path is a regular file"""
try:
st = os.stat(path)
except os.error:
return False
return stat.S_ISREG(st.st_mode)
# Is a path a directory?
# This follows symbolic links, so both islink() and isdir()
# can be true for the same path on systems that support symlinks
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except os.error:
return False
return stat.S_ISDIR(st.st_mode)
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
return os.stat(filename).st_size
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
return os.stat(filename).st_mtime
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
return os.stat(filename).st_atime
def getctime(filename):
"""Return the metadata change time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
# Generic implementation of splitext, to be parametrized with
# the separators
def _splitext(p, sep, altsep, extsep):
"""Split the extension from a pathname.
Extension is everything from the last dot to the end, ignoring
leading dots. Returns "(root, ext)"; ext may be empty."""
# NOTE: This code must work for text and bytes strings.
sepIndex = p.rfind(sep)
if altsep:
altsepIndex = p.rfind(altsep)
sepIndex = max(sepIndex, altsepIndex)
dotIndex = p.rfind(extsep)
if dotIndex > sepIndex:
# skip all leading dots
filenameIndex = sepIndex + 1
while filenameIndex < dotIndex:
if p[filenameIndex:filenameIndex+1] != extsep:
return p[:dotIndex], p[dotIndex:]
filenameIndex += 1
return p, p[:0]
|
agpl-3.0
|
twitchyliquid64/misc-scripts
|
s3tool/boto/dynamodb2/items.py
|
32
|
14656
|
from copy import deepcopy
class NEWVALUE(object):
# A marker for new data added.
pass
class Item(object):
"""
An object representing the item data within a DynamoDB table.
An item is largely schema-free, meaning it can contain any data. The only
limitation is that it must have data for the fields in the ``Table``'s
schema.
This object presents a dictionary-like interface for accessing/storing
data. It also tries to intelligently track how data has changed throughout
the life of the instance, to be as efficient as possible about updates.
Empty items, or items that have no data, are considered falsey.
"""
def __init__(self, table, data=None, loaded=False):
"""
Constructs an (unsaved) ``Item`` instance.
To persist the data in DynamoDB, you'll need to call the ``Item.save``
(or ``Item.partial_save``) on the instance.
Requires a ``table`` parameter, which should be a ``Table`` instance.
This is required, as DynamoDB's API is focus around all operations
being table-level. It's also for persisting schema around many objects.
Optionally accepts a ``data`` parameter, which should be a dictionary
of the fields & values of the item. Alternatively, an ``Item`` instance
may be provided from which to extract the data.
Optionally accepts a ``loaded`` parameter, which should be a boolean.
``True`` if it was preexisting data loaded from DynamoDB, ``False`` if
it's new data from the user. Default is ``False``.
Example::
>>> users = Table('users')
>>> user = Item(users, data={
... 'username': 'johndoe',
... 'first_name': 'John',
... 'date_joined': 1248o61592,
... })
# Change existing data.
>>> user['first_name'] = 'Johann'
# Add more data.
>>> user['last_name'] = 'Doe'
# Delete data.
>>> del user['date_joined']
# Iterate over all the data.
>>> for field, val in user.items():
... print "%s: %s" % (field, val)
username: johndoe
first_name: John
date_joined: 1248o61592
"""
self.table = table
self._loaded = loaded
self._orig_data = {}
self._data = data
self._dynamizer = table._dynamizer
if isinstance(self._data, Item):
self._data = self._data._data
if self._data is None:
self._data = {}
if self._loaded:
self._orig_data = deepcopy(self._data)
def __getitem__(self, key):
return self._data.get(key, None)
def __setitem__(self, key, value):
self._data[key] = value
def __delitem__(self, key):
if not key in self._data:
return
del self._data[key]
def keys(self):
return self._data.keys()
def values(self):
return self._data.values()
def items(self):
return self._data.items()
def get(self, key, default=None):
return self._data.get(key, default)
def __iter__(self):
for key in self._data:
yield self._data[key]
def __contains__(self, key):
return key in self._data
def __bool__(self):
return bool(self._data)
__nonzero__ = __bool__
def _determine_alterations(self):
"""
Checks the ``-orig_data`` against the ``_data`` to determine what
changes to the data are present.
Returns a dictionary containing the keys ``adds``, ``changes`` &
``deletes``, containing the updated data.
"""
alterations = {
'adds': {},
'changes': {},
'deletes': [],
}
orig_keys = set(self._orig_data.keys())
data_keys = set(self._data.keys())
# Run through keys we know are in both for changes.
for key in orig_keys.intersection(data_keys):
if self._data[key] != self._orig_data[key]:
if self._is_storable(self._data[key]):
alterations['changes'][key] = self._data[key]
else:
alterations['deletes'].append(key)
# Run through additions.
for key in data_keys.difference(orig_keys):
if self._is_storable(self._data[key]):
alterations['adds'][key] = self._data[key]
# Run through deletions.
for key in orig_keys.difference(data_keys):
alterations['deletes'].append(key)
return alterations
def needs_save(self, data=None):
"""
Returns whether or not the data has changed on the ``Item``.
Optionally accepts a ``data`` argument, which accepts the output from
``self._determine_alterations()`` if you've already called it. Typically
unnecessary to do. Default is ``None``.
Example:
>>> user.needs_save()
False
>>> user['first_name'] = 'Johann'
>>> user.needs_save()
True
"""
if data is None:
data = self._determine_alterations()
needs_save = False
for kind in ['adds', 'changes', 'deletes']:
if len(data[kind]):
needs_save = True
break
return needs_save
def mark_clean(self):
"""
Marks an ``Item`` instance as no longer needing to be saved.
Example:
>>> user.needs_save()
False
>>> user['first_name'] = 'Johann'
>>> user.needs_save()
True
>>> user.mark_clean()
>>> user.needs_save()
False
"""
self._orig_data = deepcopy(self._data)
def mark_dirty(self):
"""
DEPRECATED: Marks an ``Item`` instance as needing to be saved.
This method is no longer necessary, as the state tracking on ``Item``
has been improved to automatically detect proper state.
"""
return
def load(self, data):
"""
This is only useful when being handed raw data from DynamoDB directly.
If you have a Python datastructure already, use the ``__init__`` or
manually set the data instead.
Largely internal, unless you know what you're doing or are trying to
mix the low-level & high-level APIs.
"""
self._data = {}
for field_name, field_value in data.get('Item', {}).items():
self[field_name] = self._dynamizer.decode(field_value)
self._loaded = True
self._orig_data = deepcopy(self._data)
def get_keys(self):
"""
Returns a Python-style dict of the keys/values.
Largely internal.
"""
key_fields = self.table.get_key_fields()
key_data = {}
for key in key_fields:
key_data[key] = self[key]
return key_data
def get_raw_keys(self):
"""
Returns a DynamoDB-style dict of the keys/values.
Largely internal.
"""
raw_key_data = {}
for key, value in self.get_keys().items():
raw_key_data[key] = self._dynamizer.encode(value)
return raw_key_data
def build_expects(self, fields=None):
"""
Builds up a list of expecations to hand off to DynamoDB on save.
Largely internal.
"""
expects = {}
if fields is None:
fields = list(self._data.keys()) + list(self._orig_data.keys())
# Only uniques.
fields = set(fields)
for key in fields:
expects[key] = {
'Exists': True,
}
value = None
# Check for invalid keys.
if not key in self._orig_data and not key in self._data:
raise ValueError("Unknown key %s provided." % key)
# States:
# * New field (only in _data)
# * Unchanged field (in both _data & _orig_data, same data)
# * Modified field (in both _data & _orig_data, different data)
# * Deleted field (only in _orig_data)
orig_value = self._orig_data.get(key, NEWVALUE)
current_value = self._data.get(key, NEWVALUE)
if orig_value == current_value:
# Existing field unchanged.
value = current_value
else:
if key in self._data:
if not key in self._orig_data:
# New field.
expects[key]['Exists'] = False
else:
# Existing field modified.
value = orig_value
else:
# Existing field deleted.
value = orig_value
if value is not None:
expects[key]['Value'] = self._dynamizer.encode(value)
return expects
def _is_storable(self, value):
# We need to prevent ``None``, empty string & empty set from
# heading to DDB, but allow false-y values like 0 & False make it.
if not value:
if not value in (0, 0.0, False):
return False
return True
def prepare_full(self):
"""
Runs through all fields & encodes them to be handed off to DynamoDB
as part of an ``save`` (``put_item``) call.
Largely internal.
"""
# This doesn't save on its own. Rather, we prepare the datastructure
# and hand-off to the table to handle creation/update.
final_data = {}
for key, value in self._data.items():
if not self._is_storable(value):
continue
final_data[key] = self._dynamizer.encode(value)
return final_data
def prepare_partial(self):
"""
Runs through **ONLY** the changed/deleted fields & encodes them to be
handed off to DynamoDB as part of an ``partial_save`` (``update_item``)
call.
Largely internal.
"""
# This doesn't save on its own. Rather, we prepare the datastructure
# and hand-off to the table to handle creation/update.
final_data = {}
fields = set()
alterations = self._determine_alterations()
for key, value in alterations['adds'].items():
final_data[key] = {
'Action': 'PUT',
'Value': self._dynamizer.encode(self._data[key])
}
fields.add(key)
for key, value in alterations['changes'].items():
final_data[key] = {
'Action': 'PUT',
'Value': self._dynamizer.encode(self._data[key])
}
fields.add(key)
for key in alterations['deletes']:
final_data[key] = {
'Action': 'DELETE',
}
fields.add(key)
return final_data, fields
def partial_save(self):
"""
Saves only the changed data to DynamoDB.
Extremely useful for high-volume/high-write data sets, this allows
you to update only a handful of fields rather than having to push
entire items. This prevents many accidental overwrite situations as
well as saves on the amount of data to transfer over the wire.
Returns ``True`` on success, ``False`` if no save was performed or
the write failed.
Example::
>>> user['last_name'] = 'Doh!'
# Only the last name field will be sent to DynamoDB.
>>> user.partial_save()
"""
key = self.get_keys()
# Build a new dict of only the data we're changing.
final_data, fields = self.prepare_partial()
if not final_data:
return False
# Remove the key(s) from the ``final_data`` if present.
# They should only be present if this is a new item, in which
# case we shouldn't be sending as part of the data to update.
for fieldname, value in key.items():
if fieldname in final_data:
del final_data[fieldname]
try:
# It's likely also in ``fields``, so remove it there too.
fields.remove(fieldname)
except KeyError:
pass
# Build expectations of only the fields we're planning to update.
expects = self.build_expects(fields=fields)
returned = self.table._update_item(key, final_data, expects=expects)
# Mark the object as clean.
self.mark_clean()
return returned
def save(self, overwrite=False):
"""
Saves all data to DynamoDB.
By default, this attempts to ensure that none of the underlying
data has changed. If any fields have changed in between when the
``Item`` was constructed & when it is saved, this call will fail so
as not to cause any data loss.
If you're sure possibly overwriting data is acceptable, you can pass
an ``overwrite=True``. If that's not acceptable, you may be able to use
``Item.partial_save`` to only write the changed field data.
Optionally accepts an ``overwrite`` parameter, which should be a
boolean. If you provide ``True``, the item will be forcibly overwritten
within DynamoDB, even if another process changed the data in the
meantime. (Default: ``False``)
Returns ``True`` on success, ``False`` if no save was performed.
Example::
>>> user['last_name'] = 'Doh!'
# All data on the Item is sent to DynamoDB.
>>> user.save()
# If it fails, you can overwrite.
>>> user.save(overwrite=True)
"""
if not self.needs_save() and not overwrite:
return False
final_data = self.prepare_full()
expects = None
if overwrite is False:
# Build expectations about *all* of the data.
expects = self.build_expects()
returned = self.table._put_item(final_data, expects=expects)
# Mark the object as clean.
self.mark_clean()
return returned
def delete(self):
"""
Deletes the item's data to DynamoDB.
Returns ``True`` on success.
Example::
# Buh-bye now.
>>> user.delete()
"""
key_data = self.get_keys()
return self.table.delete_item(**key_data)
|
mit
|
ncos/mipt-airdrone
|
src/action_client/main.py
|
3
|
11363
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('action_client')
import rospy, smach, smach_ros
import time
from action_client.msg import *
from actionlib import *
from actionlib.msg import *
movement_speed = 0.0
# Ask user whether we want to launch the quadrotor or not
class PauseState(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['continue', 'abort'])
def execute(self, userdata):
rospy.loginfo("PauseState: press 'y' to continue or 'n' to abort...")
char = raw_input()
while char.lower() not in ("y", "n"):
rospy.loginfo("Please, choose 'y' or 'n'")
char = raw_input()
if char == 'y':
return 'continue'
return 'abort'
class PauseStateDebug(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['continue', 'abort'])
def execute(self, userdata):
rospy.loginfo("PauseStateDebug: press 'y' to continue or 'n' to abort...")
char = raw_input()
while char.lower() not in ("y", "n"):
rospy.loginfo("Please, choose 'y' or 'n'")
char = raw_input()
if char == 'y':
return 'continue'
return 'abort'
def move_along_result_cb(userdata, status, result):
if status == GoalStatus.PREEMPTED:
rospy.loginfo("move_along_result_cb -> status == GoalStatus.PREEMPTED")
return 'aborted'
if status == GoalStatus.ABORTED:
rospy.loginfo("approach_door_result_cb -> status == GoalStatus.ABORTED")
return 'aborted'
if status == GoalStatus.SUCCEEDED:
if result.error == True:
rospy.loginfo("move_along_result_cb -> result.error == True")
return 'aborted'
if result.found == True:
rospy.loginfo("move_along_result_cb -> result.found == True")
return 'wall_found'
if result.land_pad == True:
rospy.loginfo("move_along_result_cb -> result.land_pad == True")
return 'land_pad'
return 'succeeded'
rospy.loginfo("(move_along_result_cb): This line should never be reached")
return 'aborted'
def approach_door_result_cb(userdata, status, result):
if status == GoalStatus.PREEMPTED:
rospy.loginfo("approach_door_result_cb -> status == GoalStatus.PREEMPTED")
return 'aborted'
if status == GoalStatus.ABORTED:
rospy.loginfo("approach_door_result_cb -> status == GoalStatus.ABORTED")
return 'aborted'
if status == GoalStatus.SUCCEEDED:
if result.success == True:
rospy.loginfo("approach_door_result_cb -> result.succes == True")
return 'succeeded'
if result.ortog_pass == True:
rospy.loginfo("approach_door_result_cb -> result.ortog_pass == True")
return 'ortog_pass'
if result.middle_pass == True:
rospy.loginfo("approach_door_result_cb -> result.middle_pass == True")
return 'middle_pass'
if result.part_failed == True:
rospy.loginfo("approach_door_result_cb -> result.part_failed == True")
return 'part_failed'
return 'aborted'
rospy.loginfo("(approach_door_result_cb): This line should never be reached")
return 'aborted'
def main():
rospy.init_node('action_server_state_mashine')
sm0 = smach.StateMachine(outcomes=['succeeded', 'aborted', 'preempted'])
with sm0:
"""
smach.StateMachine.add('Pause', PauseState (),
transitions={'continue':'Approach door',
'abort' :'aborted'})
smach.StateMachine.add('Approach door',
smach_ros.SimpleActionState('ApproachDoorAS',
ApproachDoorAction,
goal = ApproachDoorGoal(),
result_cb = approach_door_result_cb,
outcomes=['aborted', 'succeeded']),
transitions={'aborted' :'Pause',
'succeeded' :'Pause'} )
"""
smach.StateMachine.add('Pause', PauseState (),
transitions={'continue':'Takeoff',
'abort' :'aborted'})
smach.StateMachine.add('PauseStateDebug', PauseStateDebug (),
transitions={'continue':'Move along',
'abort' :'aborted'})
smach.StateMachine.add('DebugState',
smach_ros.SimpleActionState('DebugStateAS',
DebugStateAction,
goal = DebugStateGoal(),
outcomes=['aborted', 'succeeded']),
transitions={'aborted' :'aborted',
'succeeded':'Takeoff'} )
smach.StateMachine.add('Move along',
smach_ros.SimpleActionState('MoveAlongAS',
MoveAlongAction,
goal = MoveAlongGoal(vel=movement_speed),
result_cb = move_along_result_cb,
outcomes=['aborted', 'succeeded', 'wall_found', 'land_pad']),
transitions={'aborted' :'aborted',
'succeeded' :'Switch wall',
'wall_found':'Approach door',
'land_pad' :'Landing'} )
smach.StateMachine.add('Switch wall',
smach_ros.SimpleActionState('SwitchWallAS', SwitchWallAction,
goal = SwitchWallGoal(rght=1),
outcomes=['aborted', 'succeeded']),
transitions={'aborted' :'aborted',
'succeeded':'Move along'} )
smach.StateMachine.add('Approach door',
smach_ros.SimpleActionState('ApproachDoorAS',
ApproachDoorAction,
goal = ApproachDoorGoal(),
result_cb = approach_door_result_cb,
outcomes=['aborted', 'succeeded', 'middle_pass', 'ortog_pass', 'part_failed']),
transitions={'aborted' :'Pause',
'ortog_pass' :'Switch side',
'middle_pass' :'Middle pass',
'part_failed' :'Move along',
'succeeded' :'Pass door'} )
smach.StateMachine.add('Pass door',
smach_ros.SimpleActionState('PassDoorAS',
PassDoorAction,
goal = PassDoorGoal(vel=-0.8),
outcomes=['aborted', 'succeeded']),
transitions={'aborted' :'Pause',
'succeeded' :'Move along'} )
smach.StateMachine.add('Switch side',
smach_ros.SimpleActionState('SwitchSideAS',
SwitchSideAction,
goal = SwitchSideGoal(),
outcomes=['aborted', 'succeeded']),
transitions={'aborted' :'Pause',
'succeeded' :'Approach wall'} )
smach.StateMachine.add('Approach wall',
smach_ros.SimpleActionState('ApproachWallAS',
ApproachWallAction,
goal = ApproachWallGoal(),
outcomes=['aborted', 'succeeded']),
transitions={'aborted' :'Pause',
'succeeded' :'Move along'} )
smach.StateMachine.add('Middle pass',
smach_ros.SimpleActionState('MiddlePassAS',
MiddlePassAction,
goal = MiddlePassGoal(),
outcomes=['aborted', 'succeeded']),
transitions={'aborted' :'Pause',
'succeeded' :'Move along'} )
smach.StateMachine.add('Takeoff',
smach_ros.SimpleActionState('TakeoffAS',
TakeoffAction,
goal = TakeoffGoal(),
outcomes=['aborted', 'succeeded']),
transitions={'aborted' :'Pause',
'succeeded' :'DebugState'} )
smach.StateMachine.add('Landing',
smach_ros.SimpleActionState('LandingAS',
LandingAction,
goal = LandingGoal(),
outcomes=['aborted', 'succeeded']),
transitions={'aborted' :'Pause',
'succeeded' :'Pause'} )
# Create and start the introspection server
# This is for debug purpose
# sis = smach_ros.IntrospectionServer('introspection_server', sm0, '/STATE_MASHINE')
# sis.start()
# Execute SMACH plan
outcome = sm0.execute()
print("State mashine has finished with result ", outcome)
rospy.spin()
#sis.stop()
if __name__ == '__main__':
if not rospy.has_param('movement_speed'):
rospy.loginfo("movement_speed parameter was not specified. Terminating...")
exit()
movement_speed = rospy.get_param("movement_speed")
print "movement_speed =", movement_speed
main()
|
mit
|
frewsxcv/AutobahnPython
|
examples/twisted/wamp/longpoll/server.py
|
3
|
2559
|
###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
import six
import datetime
from twisted.python import log
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.internet.endpoints import serverFromString
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.wamp import router, types
from autobahn.twisted.util import sleep
from autobahn.twisted import wamp, websocket
from autobahn.twisted.resource import WebSocketResource
from autobahn.twisted.longpoll import WampLongPollResource
class MyBackendComponent(wamp.ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
counter = 0
while True:
self.publish(u'com.myapp.topic1', counter)
print("Published event.")
counter += 1
yield sleep(2)
if __name__ == '__main__':
log.startLogging(sys.stdout)
router_factory = router.RouterFactory()
session_factory = wamp.RouterSessionFactory(router_factory)
component_config = types.ComponentConfig(realm = "realm1")
component_session = MyBackendComponent(component_config)
session_factory.add(component_session)
ws_factory = websocket.WampWebSocketServerFactory(session_factory, \
debug = False, \
debug_wamp = False)
ws_factory.startFactory()
ws_resource = WebSocketResource(ws_factory)
lp_resource = WampLongPollResource(session_factory, debug = True, debug_session_id = "kjmd3sBLOUnb3Fyr")
root = File(".")
root.putChild("ws", ws_resource)
root.putChild("lp", lp_resource)
web_factory = Site(root)
web_factory.noisy = False
server = serverFromString(reactor, "tcp:8080")
server.listen(web_factory)
reactor.run()
|
apache-2.0
|
ultrabox/Ultra.Stream
|
servers/moevideos.py
|
32
|
9669
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para moevideos
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
from core import unpackerjs
def test_video_exists( page_url ):
logger.info("[moevideos.py] test_video_exists(page_url='%s')" % page_url)
# Si es el código embed directamente, no se puede comprobar
if "video.php" in page_url:
return True,""
# No existe / borrado: http://www.moevideos.net/online/27991
data = scrapertools.cache_page(page_url)
#logger.info("data="+data)
if "<span class='tabular'>No existe</span>" in data:
return False,"No existe o ha sido borrado de moevideos"
else:
# Existe: http://www.moevideos.net/online/18998
patron = "<span class='tabular'>([^>]+)</span>"
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches)>0:
return True,""
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[moevideos.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
if page_url.startswith("http://www.moevideos.net/online"):
headers = []
headers.append(['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'])
data = scrapertools.cache_page( page_url , headers=headers )
# Descarga el script (no sirve para nada, excepto las cookies)
headers.append(['Referer',page_url])
post = "id=1&enviar2=ver+video"
data = scrapertools.cache_page( page_url , post=post, headers=headers )
### Modificado 12-6-2014
#code = scrapertools.get_match(data,'flashvars\="file\=([^"]+)"')
#<iframe width="860" height="440" src="http://moevideo.net/framevideo/16363.1856374b43bbd40c7f8d2b25b8e5?width=860&height=440" frameborder="0" allowfullscreen ></iframe>
code = scrapertools.get_match(data,'<iframe width="860" height="440" src="http://moevideo.net/framevideo/([^\?]+)\?width=860\&height=440" frameborder="0" allowfullscreen ></iframe>')
logger.info("code="+code)
else:
#http://moevideo.net/?page=video&uid=81492.8c7b6086f4942341aa1b78fb92df
code = scrapertools.get_match(page_url,"uid=([a-z0-9\.]+)")
# API de letitbit
headers2 = []
headers2.append(['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'])
### Modificado 12-6-2014
url = "http://api.letitbit.net"
#url = "http://api.moevideo.net"
#post = "r=%5B%22tVL0gjqo5%22%2C%5B%22preview%2Fflv%5Fimage%22%2C%7B%22uid%22%3A%2272871%2E71f6541e64b0eda8da727a79424d%22%7D%5D%2C%5B%22preview%2Fflv%5Flink%22%2C%7B%22uid%22%3A%2272871%2E71f6541e64b0eda8da727a79424d%22%7D%5D%5D"
#post = "r=%5B%22tVL0gjqo5%22%2C%5B%22preview%2Fflv%5Fimage%22%2C%7B%22uid%22%3A%2212110%2E1424270cc192f8856e07d5ba179d%22%7D%5D%2C%5B%22preview%2Fflv%5Flink%22%2C%7B%22uid%22%3A%2212110%2E1424270cc192f8856e07d5ba179d%22%7D%5D%5D
#post = "r=%5B%22tVL0gjqo5%22%2C%5B%22preview%2Fflv%5Fimage%22%2C%7B%22uid%22%3A%2268653%2E669cbb12a3b9ebee43ce14425d9e%22%7D%5D%2C%5B%22preview%2Fflv%5Flink%22%2C%7B%22uid%22%3A%2268653%2E669cbb12a3b9ebee43ce14425d9e%22%7D%5D%5D"
post = 'r=["tVL0gjqo5",["preview/flv_image",{"uid":"'+code+'"}],["preview/flv_link",{"uid":"'+code+'"}]]'
data = scrapertools.cache_page(url,headers=headers2,post=post)
logger.info("data="+data)
if ',"not_found"' in data:
return []
data = data.replace("\\","")
logger.info("data="+data)
patron = '"link"\:"([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
video_url = matches[0]+"?ref=www.moevideos.net|User-Agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:15.0) Gecko/20100101 Firefox/15.0.1&Range=bytes:0-"
logger.info("[moevideos.py] video_url="+video_url)
video_urls = []
video_urls.append( [ scrapertools.get_filename_from_url(video_url)[-4:] + " [moevideos]",video_url ] )
for video_url in video_urls:
logger.info("[moevideos.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://www.moevideos.net/online/18998
patronvideos = 'moevideos.net/online/(\d+)'
logger.info("[moevideos.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[moevideos]"
url = "http://www.moevideos.net/online/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'moevideos' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
# http://www.moevideos.net/view/30086
patronvideos = 'moevideos.net/view/(\d+)'
logger.info("[moevideos.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[moevideos]"
url = "http://www.moevideos.net/online/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'moevideos' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
# http://moevideo.net/video.php?file=71845.7a9a6d72d6133bb7860375b63f0e&width=600&height=450
patronvideos = 'moevideo.net/video.php\?file\=([a-z0-9\.]+)'
logger.info("[moevideos.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[moevideos]"
url = "http://moevideo.net/?page=video&uid="+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'moevideos' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
#http://www2.cinetux.org/moevideo.php?id=20671.29b19bfe3cfcf1c203816a78d1e8
patronvideos = 'cinetux.org/moevideo.php\?id\=([a-z0-9\.]+)'
logger.info("[moevideos.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[moevideos]"
url = "http://moevideo.net/?page=video&uid="+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'moevideos' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
#http://moevideo.net/?page=video&uid=81492.8c7b6086f4942341aa1b78fb92df
patronvideos = 'moevideo.net/\?page\=video\&uid=([a-z0-9\.]+)'
logger.info("[moevideos.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[moevideos]"
url = "http://moevideo.net/?page=video&uid="+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'moevideos' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
#http://moevideo.net/framevideo/95250.9c5a5f9faea7207a842d609e4913
patronvideos = 'moevideo.net/framevideo/([a-z0-9\.]+)'
logger.info("[moevideos.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[moevideos]"
url = "http://moevideo.net/?page=video&uid="+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'moevideos' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
#http://moevideo.net/framevideo/95250.9c5a5f9faea7207a842d609e4913
patronvideos = 'moevideo.net/video/([a-z0-9\.]+)'
logger.info("[moevideos.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[moevideos]"
url = "http://moevideo.net/?page=video&uid="+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'moevideos' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
#http://moevideo.net/swf/letplayerflx3.swf?file=23885.2b0a98945f7aa37acd1d6a0e9713
patronvideos = 'moevideo.net/swf/letplayerflx3.swf\?file\=([a-z0-9\.]+)'
logger.info("[moevideos.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[moevideos]"
url = "http://moevideo.net/?page=video&uid="+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'moevideos' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
def test():
video_urls = get_video_url("http://www.moevideos.net/online/243989")
#video_urls = get_video_url("http://moevideo.net/?page=video&uid=60823.6717786f74cd87a6cbeeb8c9e48d")
return len(video_urls)>0
|
gpl-2.0
|
DigitalPandacoin/pandacoin
|
test/functional/feature_config_args.py
|
1
|
2329
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various command line arguments and configuration file parameters."""
import os
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import get_datadir_path
class ConfArgsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
self.stop_node(0)
# Remove the -datadir argument so it doesn't override the config file
self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")]
default_data_dir = get_datadir_path(self.options.tmpdir, 0)
new_data_dir = os.path.join(default_data_dir, 'newdatadir')
new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2')
# Check that using -datadir argument on non-existent directory fails
self.nodes[0].datadir = new_data_dir
self.assert_start_raises_init_error(0, ['-datadir='+new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.')
# Check that using non-existent datadir in conf file fails
conf_file = os.path.join(default_data_dir, "pandacoin.conf")
with open(conf_file, 'a', encoding='utf8') as f:
f.write("datadir=" + new_data_dir + "\n")
self.assert_start_raises_init_error(0, ['-conf='+conf_file], 'Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.')
# Create the directory and ensure the config file now works
os.mkdir(new_data_dir)
self.start_node(0, ['-conf='+conf_file, '-wallet=w1'])
self.stop_node(0)
assert os.path.isfile(os.path.join(new_data_dir, 'regtest', 'wallets', 'w1'))
# Ensure command line argument overrides datadir in conf
os.mkdir(new_data_dir_2)
self.nodes[0].datadir = new_data_dir_2
self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file, '-wallet=w2'])
assert os.path.isfile(os.path.join(new_data_dir_2, 'regtest', 'wallets', 'w2'))
if __name__ == '__main__':
ConfArgsTest().main()
|
mit
|
metomi/rose
|
metomi/rosie/graph.py
|
1
|
9686
|
# Copyright (C) British Crown (Met Office) & Contributors.
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""Plot suite ancestry."""
import textwrap
import time
import pygraphviz
import metomi.rose.metadata_graph
import metomi.rose.opt_parse
import metomi.rose.reporter
import metomi.rosie.suite_id
import metomi.rosie.ws_client
import metomi.rosie.ws_client_cli
class NoConnectionsEvent(metomi.rose.reporter.Event):
"""An event raised if the graph has no edges or nodes.
event.args[0] is the filter id string.
"""
KIND = metomi.rose.reporter.Reporter.KIND_ERR
def __str__(self):
return "%s: no copy relationships to other suites" % self.args[0]
class PrintSuiteDetails(metomi.rose.reporter.Event):
"""An event to print out suite details when writing to CLI"""
KIND = metomi.rose.reporter.Reporter.KIND_OUT
def __str__(self):
template = " %s"
argslist = [self.args[0]]
if len(self.args) > 1:
for arg in self.args[1]:
template += ", %s"
argslist.append(arg)
return template % tuple(argslist)
def get_suite_data(prefix, properties=None):
"""Retrieve a dictionary containing the contents of RosieWS
Adds in any extra requested properties
"""
if properties is None:
properties = []
ws_client = metomi.rosie.ws_client.RosieWSClient(
prefixes=[prefix],
event_handler=metomi.rose.reporter.Reporter()
)
suite_data = ws_client.search(prefix, all_revs=1)[0][0]
for dict_row in sorted(suite_data, key=lambda _: _["revision"]):
suite_id = metomi.rosie.suite_id.SuiteId.from_idx_branch_revision(
dict_row["idx"],
dict_row["branch"],
dict_row["revision"]
)
dict_row["suite"] = suite_id.to_string_with_version()
if "local" in properties:
dict_row["local"] = suite_id.get_status()
if "date" in properties:
dict_row["date"] = time.strftime(
metomi.rosie.ws_client_cli.DATE_TIME_FORMAT,
time.gmtime(dict_row.get("date"))
)
return suite_data
def calculate_edges(graph, suite_data, filter_id=None, properties=None,
max_distance=None):
"""Get all connected suites for a prefix, optionally filtered."""
if properties is None:
properties = []
node_rosie_properties = {}
edges = []
forward_edges = {}
back_edges = {}
for dict_row in sorted(suite_data, key=lambda _: _["revision"]):
idx = dict_row["idx"]
node_rosie_properties[idx] = []
for prop in properties:
node_rosie_properties[idx].append(dict_row.get(prop))
from_idx = dict_row.get("from_idx")
if from_idx is None:
continue
edges.append((from_idx, idx))
forward_edges.setdefault(from_idx, [])
forward_edges[from_idx].append(idx)
back_edges.setdefault(idx, [])
back_edges[idx].append(from_idx)
if filter_id is None:
# Plot all the edges we've found.
for edge in sorted(edges):
node0, node1 = edge
add_node(graph, node0, node_rosie_properties.get(node0))
add_node(graph, node1, node_rosie_properties.get(node1))
graph.add_edge(edge[0], edge[1])
else:
reporter = metomi.rose.reporter.Reporter()
# Only plot the connections involving filter_id.
node_stack = []
node_stack = [(filter_id, 0)]
add_node(graph, filter_id, node_rosie_properties.get(filter_id),
fillcolor="lightgrey", style="filled")
ok_nodes = set([])
while node_stack:
node, distance = node_stack.pop()
if max_distance is not None and distance > max_distance:
continue
ok_nodes.add(node)
for neighbour_node in (forward_edges.get(node, []) +
back_edges.get(node, [])):
if neighbour_node not in ok_nodes:
node_stack.append((neighbour_node, distance + 1))
if len(ok_nodes) == 1:
# There are no related suites.
reporter(NoConnectionsEvent(filter_id))
for edge in sorted(edges):
node0, node1 = edge
if node0 in ok_nodes and node1 in ok_nodes:
add_node(graph, node0, node_rosie_properties.get(node0))
add_node(graph, node1, node_rosie_properties.get(node1))
graph.add_edge(node0, node1)
def add_node(graph, node, node_label_properties, **kwargs):
"""Add a node with a particular label."""
label_lines = [node]
if node_label_properties is not None:
for property_value in node_label_properties:
label_lines.extend(textwrap.wrap(str(property_value)))
label_text = "\\n".join(label_lines) # \n must be escaped for graphviz.
kwargs.update({"label": label_text})
graph.add_node(node, **kwargs)
def make_graph(suite_data, filter_id, properties, prefix, max_distance=None):
"""Construct the pygraphviz graph."""
graph = pygraphviz.AGraph(directed=True)
graph.graph_attr["rankdir"] = "LR"
if filter_id:
graph.graph_attr["name"] = filter_id + " copy tree"
else:
graph.graph_attr["name"] = prefix + " copy tree"
calculate_edges(graph, suite_data, filter_id, properties,
max_distance=max_distance)
return graph
def output_graph(graph, filename=None, debug_mode=False):
"""Draw the graph to filename (or temporary file if None)."""
metomi.rose.metadata_graph.output_graph(graph, debug_mode=debug_mode,
filename=filename)
def print_graph(suite_data, filter_id, properties=None, max_distance=None):
"""Dump out list of graph entries relating to a suite"""
if properties is None:
properties = []
reporter = metomi.rose.reporter.Reporter()
ancestry = {}
# Process suite_data to get ancestry tree
for dict_row in sorted(suite_data, key=lambda _: _["revision"]):
idx = dict_row["idx"]
from_idx = dict_row.get("from_idx")
if idx not in ancestry:
ancestry[idx] = {'parent': None, 'children': []}
if from_idx:
ancestry[idx]['parent'] = from_idx
for prop in properties:
ancestry[idx][prop] = dict_row.get(prop)
if from_idx in ancestry:
ancestry[from_idx]['children'].append(idx)
else:
ancestry[from_idx] = {'parent': None, 'children': [idx]}
# Print out info
parent_id = ancestry[filter_id]['parent']
if parent_id:
reporter(PrintSuiteDetails(
parent_id, [ancestry[parent_id][p] for p in properties]),
prefix="[parent]")
else:
reporter(PrintSuiteDetails(None), prefix="[parent]")
children = ancestry[filter_id]['children']
generation = 1
# Print out each generation of child suites
while children:
next_children = []
for child in children:
reporter(PrintSuiteDetails(child,
[ancestry[child][p] for p in properties]),
prefix="[child%s]" % generation)
# If a child has children add to list of next generation children
if ancestry[child]['children']:
next_children += ancestry[child]['children']
if max_distance and generation >= max_distance:
break
generation += 1
children = next_children
def main():
"""Provide the CLI interface."""
opt_parser = metomi.rose.opt_parse.RoseOptionParser()
opt_parser.add_my_options("distance",
"output_file",
"prefix",
"property",
"text")
opts, args = opt_parser.parse_args()
filter_id = None
if args:
filter_id = args[0]
prefix = metomi.rosie.suite_id.SuiteId(id_text=filter_id).prefix
if opts.prefix:
opt_parser.error("No need to specify --prefix when specifying ID")
elif opts.prefix:
prefix = opts.prefix
else:
prefix = metomi.rosie.suite_id.SuiteId.get_prefix_default()
if opts.distance and not args:
opt_parser.error("distance option requires an ID")
if opts.text and not args:
opt_parser.error("print option requires an ID")
suite_data = get_suite_data(prefix, opts.property)
if opts.text:
print_graph(suite_data, filter_id, opts.property,
max_distance=opts.distance)
else:
graph = make_graph(suite_data, filter_id, opts.property, prefix,
max_distance=opts.distance)
output_graph(graph, filename=opts.output_file,
debug_mode=opts.debug_mode)
if __name__ == "__main__":
main()
|
gpl-3.0
|
umuzungu/zipline
|
zipline/pipeline/loaders/equity_pricing_loader.py
|
3
|
4665
|
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numpy import (
iinfo,
uint32,
)
from zipline.data.us_equity_pricing import (
BcolzDailyBarReader,
SQLiteAdjustmentReader,
)
from zipline.lib.adjusted_array import AdjustedArray
from zipline.errors import NoFurtherDataError
from .base import PipelineLoader
UINT32_MAX = iinfo(uint32).max
class USEquityPricingLoader(PipelineLoader):
"""
PipelineLoader for US Equity Pricing data
Delegates loading of baselines and adjustments.
"""
def __init__(self, raw_price_loader, adjustments_loader):
self.raw_price_loader = raw_price_loader
# HACK: Pull the calendar off our raw_price_loader so that we can
# backshift dates.
self._calendar = self.raw_price_loader._calendar
self.adjustments_loader = adjustments_loader
@classmethod
def from_files(cls, pricing_path, adjustments_path):
"""
Create a loader from a bcolz equity pricing dir and a SQLite
adjustments path.
Parameters
----------
pricing_path : str
Path to a bcolz directory written by a BcolzDailyBarWriter.
adjusments_path : str
Path to an adjusments db written by a SQLiteAdjustmentWriter.
"""
return cls(
BcolzDailyBarReader(pricing_path),
SQLiteAdjustmentReader(adjustments_path)
)
def load_adjusted_array(self, columns, dates, assets, mask):
# load_adjusted_array is called with dates on which the user's algo
# will be shown data, which means we need to return the data that would
# be known at the start of each date. We assume that the latest data
# known on day N is the data from day (N - 1), so we shift all query
# dates back by a day.
start_date, end_date = _shift_dates(
self._calendar, dates[0], dates[-1], shift=1,
)
colnames = [c.name for c in columns]
raw_arrays = self.raw_price_loader.load_raw_arrays(
colnames,
start_date,
end_date,
assets,
)
adjustments = self.adjustments_loader.load_adjustments(
colnames,
dates,
assets,
)
out = {}
for c, c_raw, c_adjs in zip(columns, raw_arrays, adjustments):
out[c] = AdjustedArray(
c_raw.astype(c.dtype),
mask,
c_adjs,
c.missing_value,
)
return out
def _shift_dates(dates, start_date, end_date, shift):
try:
start = dates.get_loc(start_date)
except KeyError:
if start_date < dates[0]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data starting on {query_start}, "
"but first known date is {calendar_start}"
).format(
query_start=str(start_date),
calendar_start=str(dates[0]),
)
)
else:
raise ValueError("Query start %s not in calendar" % start_date)
# Make sure that shifting doesn't push us out of the calendar.
if start < shift:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data from {shift}"
" days before {query_start}, but first known date is only "
"{start} days earlier."
).format(shift=shift, query_start=start_date, start=start),
)
try:
end = dates.get_loc(end_date)
except KeyError:
if end_date > dates[-1]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requesting data up to {query_end}, "
"but last known date is {calendar_end}"
).format(
query_end=end_date,
calendar_end=dates[-1],
)
)
else:
raise ValueError("Query end %s not in calendar" % end_date)
return dates[start - shift], dates[end - shift]
|
apache-2.0
|
yigitguler/django
|
tests/admin_views/admin.py
|
14
|
28973
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import tempfile
import os
from django import forms
from django.contrib import admin
from django.contrib.admin.views.main import ChangeList
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.core.mail import EmailMessage
from django.core.servers.basehttp import FileWrapper
from django.conf.urls import url
from django.forms.models import BaseModelFormSet
from django.http import HttpResponse, StreamingHttpResponse
from django.contrib.admin import BooleanFieldListFilter
from django.utils.safestring import mark_safe
from django.utils.six import StringIO
from .models import (Article, Chapter, Child, Parent, Picture, Widget,
DooHickey, Grommet, Whatsit, FancyDoodad, Category, Link,
PrePopulatedPost, PrePopulatedSubPost, CustomArticle, Section,
ModelWithStringPrimaryKey, Color, Thing, Actor, Inquisition, Sketch,
Person, Persona, FooAccount, BarAccount, Subscriber, ExternalSubscriber,
OldSubscriber, Podcast, Vodcast, EmptyModel, Fabric, Gallery, Language,
Recommendation, Recommender, Collector, Post, Gadget, Villain,
SuperVillain, Plot, PlotDetails, CyclicOne, CyclicTwo, WorkHour,
Reservation, FoodDelivery, RowLevelChangePermissionModel, Paper,
CoverLetter, Story, OtherStory, Book, Promo, ChapterXtra1, Pizza, Topping,
Album, Question, Answer, ComplexSortedPerson, PluggableSearchPerson,
PrePopulatedPostLargeSlug, AdminOrderedField, AdminOrderedModelMethod,
AdminOrderedAdminMethod, AdminOrderedCallable, Report, Color2,
UnorderedObject, MainPrepopulated, RelatedPrepopulated, UndeletableObject,
UnchangeableObject, UserMessenger, Simple, Choice, ShortMessage, Telegram,
FilteredManager, EmptyModelHidden, EmptyModelVisible, EmptyModelMixin,
State, City, Restaurant, Worker, ParentWithDependentChildren,
DependentChild, StumpJoke, FieldOverridePost, FunkyTag,
ReferencedByParent, ChildOfReferer, M2MReference, ReferencedByInline,
InlineReference, InlineReferer, Ingredient)
def callable_year(dt_value):
try:
return dt_value.year
except AttributeError:
return None
callable_year.admin_order_field = 'date'
class ArticleInline(admin.TabularInline):
model = Article
prepopulated_fields = {
'title': ('content',)
}
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section')
})
)
class ChapterInline(admin.TabularInline):
model = Chapter
class ChapterXtra1Admin(admin.ModelAdmin):
list_filter = ('chap',
'chap__title',
'chap__book',
'chap__book__name',
'chap__book__promo',
'chap__book__promo__name',)
class ArticleAdmin(admin.ModelAdmin):
list_display = ('content', 'date', callable_year, 'model_year',
'modeladmin_year', 'model_year_reversed')
list_filter = ('date', 'section')
view_on_site = False
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section')
})
)
def changelist_view(self, request):
"Test that extra_context works"
return super(ArticleAdmin, self).changelist_view(
request, extra_context={
'extra_var': 'Hello!'
}
)
def modeladmin_year(self, obj):
return obj.date.year
modeladmin_year.admin_order_field = 'date'
modeladmin_year.short_description = None
def delete_model(self, request, obj):
EmailMessage(
'Greetings from a deleted object',
'I hereby inform you that some user deleted me',
'from@example.com',
['to@example.com']
).send()
return super(ArticleAdmin, self).delete_model(request, obj)
def save_model(self, request, obj, form, change=True):
EmailMessage(
'Greetings from a created object',
'I hereby inform you that some user created me',
'from@example.com',
['to@example.com']
).send()
return super(ArticleAdmin, self).save_model(request, obj, form, change)
class ArticleAdmin2(admin.ModelAdmin):
def has_module_permission(self, request):
return False
class RowLevelChangePermissionModelAdmin(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
""" Only allow changing objects with even id number """
return request.user.is_staff and (obj is not None) and (obj.id % 2 == 0)
class CustomArticleAdmin(admin.ModelAdmin):
"""
Tests various hooks for using custom templates and contexts.
"""
change_list_template = 'custom_admin/change_list.html'
change_form_template = 'custom_admin/change_form.html'
add_form_template = 'custom_admin/add_form.html'
object_history_template = 'custom_admin/object_history.html'
delete_confirmation_template = 'custom_admin/delete_confirmation.html'
delete_selected_confirmation_template = 'custom_admin/delete_selected_confirmation.html'
def changelist_view(self, request):
"Test that extra_context works"
return super(CustomArticleAdmin, self).changelist_view(
request, extra_context={
'extra_var': 'Hello!'
}
)
class ThingAdmin(admin.ModelAdmin):
list_filter = ('color__warm', 'color__value', 'pub_date',)
class InquisitionAdmin(admin.ModelAdmin):
list_display = ('leader', 'country', 'expected')
class SketchAdmin(admin.ModelAdmin):
raw_id_fields = ('inquisition', 'defendant0', 'defendant1')
class FabricAdmin(admin.ModelAdmin):
list_display = ('surface',)
list_filter = ('surface',)
class BasePersonModelFormSet(BaseModelFormSet):
def clean(self):
for person_dict in self.cleaned_data:
person = person_dict.get('id')
alive = person_dict.get('alive')
if person and alive and person.name == "Grace Hopper":
raise forms.ValidationError("Grace is not a Zombie")
class PersonAdmin(admin.ModelAdmin):
list_display = ('name', 'gender', 'alive')
list_editable = ('gender', 'alive')
list_filter = ('gender',)
search_fields = ('^name',)
save_as = True
def get_changelist_formset(self, request, **kwargs):
return super(PersonAdmin, self).get_changelist_formset(request,
formset=BasePersonModelFormSet, **kwargs)
def get_queryset(self, request):
# Order by a field that isn't in list display, to be able to test
# whether ordering is preserved.
return super(PersonAdmin, self).get_queryset(request).order_by('age')
class FooAccountAdmin(admin.StackedInline):
model = FooAccount
extra = 1
class BarAccountAdmin(admin.StackedInline):
model = BarAccount
extra = 1
class PersonaAdmin(admin.ModelAdmin):
inlines = (
FooAccountAdmin,
BarAccountAdmin
)
class SubscriberAdmin(admin.ModelAdmin):
actions = ['mail_admin']
def mail_admin(self, request, selected):
EmailMessage(
'Greetings from a ModelAdmin action',
'This is the test email from an admin action',
'from@example.com',
['to@example.com']
).send()
def external_mail(modeladmin, request, selected):
EmailMessage(
'Greetings from a function action',
'This is the test email from a function action',
'from@example.com',
['to@example.com']
).send()
external_mail.short_description = 'External mail (Another awesome action)'
def redirect_to(modeladmin, request, selected):
from django.http import HttpResponseRedirect
return HttpResponseRedirect('/some-where-else/')
redirect_to.short_description = 'Redirect to (Awesome action)'
def download(modeladmin, request, selected):
buf = StringIO('This is the content of the file')
return StreamingHttpResponse(FileWrapper(buf))
download.short_description = 'Download subscription'
def no_perm(modeladmin, request, selected):
return HttpResponse(content='No permission to perform this action',
status=403)
no_perm.short_description = 'No permission to run'
class ExternalSubscriberAdmin(admin.ModelAdmin):
actions = [redirect_to, external_mail, download, no_perm]
class PodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'release_date')
list_editable = ('release_date',)
date_hierarchy = 'release_date'
ordering = ('name',)
class VodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'released')
list_editable = ('released',)
ordering = ('name',)
class ChildInline(admin.StackedInline):
model = Child
class ParentAdmin(admin.ModelAdmin):
model = Parent
inlines = [ChildInline]
list_editable = ('name',)
def save_related(self, request, form, formsets, change):
super(ParentAdmin, self).save_related(request, form, formsets, change)
first_name, last_name = form.instance.name.split()
for child in form.instance.child_set.all():
if len(child.name.split()) < 2:
child.name = child.name + ' ' + last_name
child.save()
class EmptyModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return super(EmptyModelAdmin, self).get_queryset(request).filter(pk__gt=1)
class OldSubscriberAdmin(admin.ModelAdmin):
actions = None
temp_storage = FileSystemStorage(tempfile.mkdtemp(dir=os.environ['DJANGO_TEST_TEMP_DIR']))
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class PictureInline(admin.TabularInline):
model = Picture
extra = 1
class GalleryAdmin(admin.ModelAdmin):
inlines = [PictureInline]
class PictureAdmin(admin.ModelAdmin):
pass
class LanguageAdmin(admin.ModelAdmin):
list_display = ['iso', 'shortlist', 'english_name', 'name']
list_editable = ['shortlist']
class RecommendationAdmin(admin.ModelAdmin):
show_full_result_count = False
search_fields = ('=titletranslation__text', '=recommender__titletranslation__text',)
class WidgetInline(admin.StackedInline):
model = Widget
class DooHickeyInline(admin.StackedInline):
model = DooHickey
class GrommetInline(admin.StackedInline):
model = Grommet
class WhatsitInline(admin.StackedInline):
model = Whatsit
class FancyDoodadInline(admin.StackedInline):
model = FancyDoodad
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'collector', 'order')
list_editable = ('order',)
class CategoryInline(admin.StackedInline):
model = Category
class CollectorAdmin(admin.ModelAdmin):
inlines = [
WidgetInline, DooHickeyInline, GrommetInline, WhatsitInline,
FancyDoodadInline, CategoryInline
]
class LinkInline(admin.TabularInline):
model = Link
extra = 1
readonly_fields = ("posted", "multiline")
def multiline(self, instance):
return "InlineMultiline\ntest\nstring"
class SubPostInline(admin.TabularInline):
model = PrePopulatedSubPost
prepopulated_fields = {
'subslug': ('subtitle',)
}
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('subslug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PrePopulatedPostAdmin(admin.ModelAdmin):
list_display = ['title', 'slug']
prepopulated_fields = {
'slug': ('title',)
}
inlines = [SubPostInline]
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('slug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'public']
readonly_fields = (
'posted', 'awesomeness_level', 'coolness', 'value',
'multiline', 'multiline_html', lambda obj: "foo"
)
inlines = [
LinkInline
]
def coolness(self, instance):
if instance.pk:
return "%d amount of cool." % instance.pk
else:
return "Unknown coolness."
def value(self, instance):
return 1000
def multiline(self, instance):
return "Multiline\ntest\nstring"
def multiline_html(self, instance):
return mark_safe("Multiline<br>\nhtml<br>\ncontent")
multiline_html.allow_tags = True
value.short_description = 'Value in $US'
class FieldOverridePostForm(forms.ModelForm):
model = FieldOverridePost
class Meta:
help_texts = {
'posted': 'Overridden help text for the date',
}
labels = {
'public': 'Overridden public label',
}
class FieldOverridePostAdmin(PostAdmin):
form = FieldOverridePostForm
class CustomChangeList(ChangeList):
def get_queryset(self, request):
return self.root_queryset.filter(pk=9999) # Does not exist
class GadgetAdmin(admin.ModelAdmin):
def get_changelist(self, request, **kwargs):
return CustomChangeList
class ToppingAdmin(admin.ModelAdmin):
readonly_fields = ('pizzas',)
class PizzaAdmin(admin.ModelAdmin):
readonly_fields = ('toppings',)
class WorkHourAdmin(admin.ModelAdmin):
list_display = ('datum', 'employee')
list_filter = ('employee',)
class FoodDeliveryAdmin(admin.ModelAdmin):
list_display = ('reference', 'driver', 'restaurant')
list_editable = ('driver', 'restaurant')
class CoverLetterAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing CoverLetter
instances.
Note that the CoverLetter model defines a __unicode__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(CoverLetterAdmin, self).get_queryset(request).defer('date_written')
class PaperAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Paper
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(PaperAdmin, self).get_queryset(request).only('title')
class ShortMessageAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing ShortMessage
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(ShortMessageAdmin, self).get_queryset(request).defer('timestamp')
class TelegramAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Telegram
instances.
Note that the Telegram model defines a __unicode__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(TelegramAdmin, self).get_queryset(request).only('title')
class StoryForm(forms.ModelForm):
class Meta:
widgets = {'title': forms.HiddenInput}
class StoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title',) # 'id' not in list_display_links
list_editable = ('content', )
form = StoryForm
ordering = ["-pk"]
class OtherStoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title', 'id') # 'id' in list_display_links
list_editable = ('content', )
ordering = ["-pk"]
class ComplexSortedPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age', 'is_employee', 'colored_name')
ordering = ('name',)
def colored_name(self, obj):
return '<span style="color: #%s;">%s</span>' % ('ff00ff', obj.name)
colored_name.allow_tags = True
colored_name.admin_order_field = 'name'
class PluggableSearchPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age')
search_fields = ('name',)
def get_search_results(self, request, queryset, search_term):
queryset, use_distinct = super(PluggableSearchPersonAdmin, self).get_search_results(request, queryset, search_term)
try:
search_term_as_int = int(search_term)
queryset |= self.model.objects.filter(age=search_term_as_int)
except:
pass
return queryset, use_distinct
class AlbumAdmin(admin.ModelAdmin):
list_filter = ['title']
class PrePopulatedPostLargeSlugAdmin(admin.ModelAdmin):
prepopulated_fields = {
'slug': ('title',)
}
class AdminOrderedFieldAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'order')
class AdminOrderedModelMethodAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'some_order')
class AdminOrderedAdminMethodAdmin(admin.ModelAdmin):
def some_admin_order(self, obj):
return obj.order
some_admin_order.admin_order_field = 'order'
ordering = ('order',)
list_display = ('stuff', 'some_admin_order')
def admin_ordered_callable(obj):
return obj.order
admin_ordered_callable.admin_order_field = 'order'
class AdminOrderedCallableAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', admin_ordered_callable)
class ReportAdmin(admin.ModelAdmin):
def extra(self, request):
return HttpResponse()
def get_urls(self):
# Corner case: Don't call parent implementation
return [
url(r'^extra/$',
self.extra,
name='cable_extra'),
]
class CustomTemplateBooleanFieldListFilter(BooleanFieldListFilter):
template = 'custom_filter_template.html'
class CustomTemplateFilterColorAdmin(admin.ModelAdmin):
list_filter = (('warm', CustomTemplateBooleanFieldListFilter),)
# For Selenium Prepopulated tests -------------------------------------
class RelatedPrepopulatedInline1(admin.StackedInline):
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2',),)
}),
)
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class RelatedPrepopulatedInline2(admin.TabularInline):
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class MainPrepopulatedAdmin(admin.ModelAdmin):
inlines = [RelatedPrepopulatedInline1, RelatedPrepopulatedInline2]
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2',),)
}),
)
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class UnorderedObjectAdmin(admin.ModelAdmin):
list_display = ['name']
list_editable = ['name']
list_per_page = 2
class UndeletableObjectAdmin(admin.ModelAdmin):
def change_view(self, *args, **kwargs):
kwargs['extra_context'] = {'show_delete': False}
return super(UndeletableObjectAdmin, self).change_view(*args, **kwargs)
class UnchangeableObjectAdmin(admin.ModelAdmin):
def get_urls(self):
# Disable change_view, but leave other urls untouched
urlpatterns = super(UnchangeableObjectAdmin, self).get_urls()
return [p for p in urlpatterns if not p.name.endswith("_change")]
def callable_on_unknown(obj):
return obj.unknown
class AttributeErrorRaisingAdmin(admin.ModelAdmin):
list_display = [callable_on_unknown, ]
class CustomManagerAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return FilteredManager.objects
class MessageTestingAdmin(admin.ModelAdmin):
actions = ["message_debug", "message_info", "message_success",
"message_warning", "message_error", "message_extra_tags"]
def message_debug(self, request, selected):
self.message_user(request, "Test debug", level="debug")
def message_info(self, request, selected):
self.message_user(request, "Test info", level="info")
def message_success(self, request, selected):
self.message_user(request, "Test success", level="success")
def message_warning(self, request, selected):
self.message_user(request, "Test warning", level="warning")
def message_error(self, request, selected):
self.message_user(request, "Test error", level="error")
def message_extra_tags(self, request, selected):
self.message_user(request, "Test tags", extra_tags="extra_tag")
class ChoiceList(admin.ModelAdmin):
list_display = ['choice']
readonly_fields = ['choice']
fields = ['choice']
class DependentChildAdminForm(forms.ModelForm):
"""
Issue #20522
Form to test child dependency on parent object's validation
"""
def clean(self):
parent = self.cleaned_data.get('parent')
if parent.family_name and parent.family_name != self.cleaned_data.get('family_name'):
raise ValidationError("Children must share a family name with their parents " +
"in this contrived test case")
return super(DependentChildAdminForm, self).clean()
class DependentChildInline(admin.TabularInline):
model = DependentChild
form = DependentChildAdminForm
class ParentWithDependentChildrenAdmin(admin.ModelAdmin):
inlines = [DependentChildInline]
# Tests for ticket 11277 ----------------------------------
class FormWithoutHiddenField(forms.ModelForm):
first = forms.CharField()
second = forms.CharField()
class FormWithoutVisibleField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField(widget=forms.HiddenInput)
class FormWithVisibleAndHiddenField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField()
class EmptyModelVisibleAdmin(admin.ModelAdmin):
form = FormWithoutHiddenField
fieldsets = (
(None, {
'fields': (('first', 'second'),),
}),
)
class EmptyModelHiddenAdmin(admin.ModelAdmin):
form = FormWithoutVisibleField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class EmptyModelMixinAdmin(admin.ModelAdmin):
form = FormWithVisibleAndHiddenField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class CityInlineAdmin(admin.TabularInline):
model = City
view_on_site = False
class StateAdmin(admin.ModelAdmin):
inlines = [CityInlineAdmin]
class RestaurantInlineAdmin(admin.TabularInline):
model = Restaurant
view_on_site = True
class CityAdmin(admin.ModelAdmin):
inlines = [RestaurantInlineAdmin]
view_on_site = True
class WorkerAdmin(admin.ModelAdmin):
def view_on_site(self, obj):
return '/worker/%s/%s/' % (obj.surname, obj.name)
class WorkerInlineAdmin(admin.TabularInline):
model = Worker
def view_on_site(self, obj):
return '/worker_inline/%s/%s/' % (obj.surname, obj.name)
class RestaurantAdmin(admin.ModelAdmin):
inlines = [WorkerInlineAdmin]
view_on_site = False
def get_changeform_initial_data(self, request):
return {'name': 'overridden_value'}
class FunkyTagAdmin(admin.ModelAdmin):
list_display = ('name', 'content_object')
class InlineReferenceInline(admin.TabularInline):
model = InlineReference
class InlineRefererAdmin(admin.ModelAdmin):
inlines = [InlineReferenceInline]
site = admin.AdminSite(name="admin")
site.site_url = '/my-site-url/'
site.register(Article, ArticleAdmin)
site.register(CustomArticle, CustomArticleAdmin)
site.register(Section, save_as=True, inlines=[ArticleInline])
site.register(ModelWithStringPrimaryKey)
site.register(Color)
site.register(Thing, ThingAdmin)
site.register(Actor)
site.register(Inquisition, InquisitionAdmin)
site.register(Sketch, SketchAdmin)
site.register(Person, PersonAdmin)
site.register(Persona, PersonaAdmin)
site.register(Subscriber, SubscriberAdmin)
site.register(ExternalSubscriber, ExternalSubscriberAdmin)
site.register(OldSubscriber, OldSubscriberAdmin)
site.register(Podcast, PodcastAdmin)
site.register(Vodcast, VodcastAdmin)
site.register(Parent, ParentAdmin)
site.register(EmptyModel, EmptyModelAdmin)
site.register(Fabric, FabricAdmin)
site.register(Gallery, GalleryAdmin)
site.register(Picture, PictureAdmin)
site.register(Language, LanguageAdmin)
site.register(Recommendation, RecommendationAdmin)
site.register(Recommender)
site.register(Collector, CollectorAdmin)
site.register(Category, CategoryAdmin)
site.register(Post, PostAdmin)
site.register(FieldOverridePost, FieldOverridePostAdmin)
site.register(Gadget, GadgetAdmin)
site.register(Villain)
site.register(SuperVillain)
site.register(Plot)
site.register(PlotDetails)
site.register(CyclicOne)
site.register(CyclicTwo)
site.register(WorkHour, WorkHourAdmin)
site.register(Reservation)
site.register(FoodDelivery, FoodDeliveryAdmin)
site.register(RowLevelChangePermissionModel, RowLevelChangePermissionModelAdmin)
site.register(Paper, PaperAdmin)
site.register(CoverLetter, CoverLetterAdmin)
site.register(ShortMessage, ShortMessageAdmin)
site.register(Telegram, TelegramAdmin)
site.register(Story, StoryAdmin)
site.register(OtherStory, OtherStoryAdmin)
site.register(Report, ReportAdmin)
site.register(MainPrepopulated, MainPrepopulatedAdmin)
site.register(UnorderedObject, UnorderedObjectAdmin)
site.register(UndeletableObject, UndeletableObjectAdmin)
site.register(UnchangeableObject, UnchangeableObjectAdmin)
site.register(State, StateAdmin)
site.register(City, CityAdmin)
site.register(Restaurant, RestaurantAdmin)
site.register(Worker, WorkerAdmin)
site.register(FunkyTag, FunkyTagAdmin)
site.register(ReferencedByParent)
site.register(ChildOfReferer)
site.register(M2MReference)
site.register(ReferencedByInline)
site.register(InlineReferer, InlineRefererAdmin)
# We intentionally register Promo and ChapterXtra1 but not Chapter nor ChapterXtra2.
# That way we cover all four cases:
# related ForeignKey object registered in admin
# related ForeignKey object not registered in admin
# related OneToOne object registered in admin
# related OneToOne object not registered in admin
# when deleting Book so as exercise all four troublesome (w.r.t escaping
# and calling force_text to avoid problems on Python 2.3) paths through
# contrib.admin.utils's get_deleted_objects function.
site.register(Book, inlines=[ChapterInline])
site.register(Promo)
site.register(ChapterXtra1, ChapterXtra1Admin)
site.register(Pizza, PizzaAdmin)
site.register(Topping, ToppingAdmin)
site.register(Album, AlbumAdmin)
site.register(Question)
site.register(Answer)
site.register(PrePopulatedPost, PrePopulatedPostAdmin)
site.register(ComplexSortedPerson, ComplexSortedPersonAdmin)
site.register(FilteredManager, CustomManagerAdmin)
site.register(PluggableSearchPerson, PluggableSearchPersonAdmin)
site.register(PrePopulatedPostLargeSlug, PrePopulatedPostLargeSlugAdmin)
site.register(AdminOrderedField, AdminOrderedFieldAdmin)
site.register(AdminOrderedModelMethod, AdminOrderedModelMethodAdmin)
site.register(AdminOrderedAdminMethod, AdminOrderedAdminMethodAdmin)
site.register(AdminOrderedCallable, AdminOrderedCallableAdmin)
site.register(Color2, CustomTemplateFilterColorAdmin)
site.register(Simple, AttributeErrorRaisingAdmin)
site.register(UserMessenger, MessageTestingAdmin)
site.register(Choice, ChoiceList)
site.register(ParentWithDependentChildren, ParentWithDependentChildrenAdmin)
site.register(EmptyModelHidden, EmptyModelHiddenAdmin)
site.register(EmptyModelVisible, EmptyModelVisibleAdmin)
site.register(EmptyModelMixin, EmptyModelMixinAdmin)
site.register(StumpJoke)
site.register(Ingredient)
# Register core models we need in our tests
from django.contrib.auth.models import User, Group
from django.contrib.auth.admin import UserAdmin, GroupAdmin
site.register(User, UserAdmin)
site.register(Group, GroupAdmin)
# Used to test URL namespaces
site2 = admin.AdminSite(name="namespaced_admin")
site2.register(User, UserAdmin)
site2.register(Group, GroupAdmin)
site7 = admin.AdminSite(name="admin7")
site7.register(Article, ArticleAdmin2)
|
bsd-3-clause
|
fernandog/Medusa
|
lib/send2trash/plat_win.py
|
79
|
1660
|
# Copyright 2013 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
from __future__ import unicode_literals
from ctypes import windll, Structure, byref, c_uint
from ctypes.wintypes import HWND, UINT, LPCWSTR, BOOL
import os.path as op
from .compat import text_type
shell32 = windll.shell32
SHFileOperationW = shell32.SHFileOperationW
class SHFILEOPSTRUCTW(Structure):
_fields_ = [
("hwnd", HWND),
("wFunc", UINT),
("pFrom", LPCWSTR),
("pTo", LPCWSTR),
("fFlags", c_uint),
("fAnyOperationsAborted", BOOL),
("hNameMappings", c_uint),
("lpszProgressTitle", LPCWSTR),
]
FO_MOVE = 1
FO_COPY = 2
FO_DELETE = 3
FO_RENAME = 4
FOF_MULTIDESTFILES = 1
FOF_SILENT = 4
FOF_NOCONFIRMATION = 16
FOF_ALLOWUNDO = 64
FOF_NOERRORUI = 1024
def send2trash(path):
if not isinstance(path, text_type):
path = text_type(path, 'mbcs')
if not op.isabs(path):
path = op.abspath(path)
fileop = SHFILEOPSTRUCTW()
fileop.hwnd = 0
fileop.wFunc = FO_DELETE
fileop.pFrom = LPCWSTR(path + '\0')
fileop.pTo = None
fileop.fFlags = FOF_ALLOWUNDO | FOF_NOCONFIRMATION | FOF_NOERRORUI | FOF_SILENT
fileop.fAnyOperationsAborted = 0
fileop.hNameMappings = 0
fileop.lpszProgressTitle = None
result = SHFileOperationW(byref(fileop))
if result:
msg = "Couldn't perform operation. Error code: %d" % result
raise OSError(msg)
|
gpl-3.0
|
40223139/LEGOg7-39
|
man.py
|
61
|
21781
|
import cherrypy
# 這是 MAN 類別的定義
'''
# 在 application 中導入子模組
import programs.cdag30.man as cdag30_man
# 加入 cdag30 模組下的 man.py 且以子模組 man 對應其 MAN() 類別
root.cdag30.man = cdag30_man.MAN()
# 完成設定後, 可以利用
/cdag30/man/assembly
# 呼叫 man.py 中 MAN 類別的 assembly 方法
'''
class MAN(object):
# 各組利用 index 引導隨後的程式執行
@cherrypy.expose
def index(self, *args, **kwargs):
outstring = '''
這是 2014CDA 協同專案下的 cdag30 模組下的 MAN 類別.<br /><br />
<!-- 這裡採用相對連結, 而非網址的絕對連結 (這一段為 html 註解) -->
<a href="assembly">執行 MAN 類別中的 assembly 方法</a><br /><br />
請確定下列零件於 V:/home/lego/man 目錄中, 且開啟空白 Creo 組立檔案.<br />
<a href="/static/lego_man.7z">lego_man.7z</a>(滑鼠右鍵存成 .7z 檔案)<br />
'''
return outstring
@cherrypy.expose
def assembly(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/wl_header.js"></script>
</head>
<body>
</script><script language="JavaScript">
/*設計一個零件組立函式*/
// featID 為組立件第一個組立零件的編號
// inc 則為 part1 的組立順序編號, 第一個入組立檔編號為 featID+0
// part2 為外加的零件名稱
function axis_plane_assembly(session, assembly, transf, featID, inc, part2, axis1, plane1, axis2, plane2){
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/man/"+part2);
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate("intseq");
ids.Append(featID+inc);
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
var asmDatums = new Array(axis1, plane1);
var compDatums = new Array(axis2, plane2);
var relation = new Array (pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS, pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (true, false);
constrs.Append(constr);
}
asmcomp.SetConstraints(constrs, void null);
}
// 以上為 axis_plane_assembly() 函式
//
function three_plane_assembly(session, assembly, transf, featID, inc, part2, plane1, plane2, plane3, plane4, plane5, plane6){
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/man/"+part2);
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate("intseq");
ids.Append(featID+inc);
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
var constrs = pfcCreate("pfcComponentConstraints");
var asmDatums = new Array(plane1, plane2, plane3);
var compDatums = new Array(plane4, plane5, plane6);
var MpfcSelect = pfcCreate("MpfcSelect");
for (var i = 0; i < 3; i++)
{
var asmItem = subassembly.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, asmDatums[i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, compDatums[i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var asmSel = MpfcSelect.CreateModelItemSelection(asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection(compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create(pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (false, false);
constrs.Append(constr);
}
asmcomp.SetConstraints(constrs, void null);
}
// 以上為 three_plane_assembly() 函式
//
// 假如 Creo 所在的操作系統不是 Windows 環境
if (!pfcIsWindows())
// 則啟動對應的 UniversalXPConnect 執行權限 (等同 Windows 下的 ActiveX)
netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
// pfcGetProESession() 是位於 pfcUtils.js 中的函式, 確定此 JavaScript 是在嵌入式瀏覽器中執行
var session = pfcGetProESession();
// 設定 config option, 不要使用元件組立流程中內建的假設約束條件
session.SetConfigOption("comp_placement_assumptions","no");
// 建立擺放零件的位置矩陣, Pro/Web.Link 中的變數無法直接建立, 必須透過 pfcCreate() 建立
var identityMatrix = pfcCreate("pfcMatrix3D");
// 建立 identity 位置矩陣
for (var x = 0; x < 4; x++)
for (var y = 0; y < 4; y++)
{
if (x == y)
identityMatrix.Set(x, y, 1.0);
else
identityMatrix.Set(x, y, 0.0);
}
// 利用 identityMatrix 建立 transf 座標轉換矩陣
var transf = pfcCreate("pfcTransform3D").Create(identityMatrix);
// 取得目前的工作目錄
var currentDir = session.getCurrentDirectory();
// 以目前已開檔的空白組立檔案, 作為 model
var model = session.CurrentModel;
// 查驗有無 model, 或 model 類別是否為組立件, 若不符合條件則丟出錯誤訊息
if (model == void null || model.Type != pfcCreate("pfcModelType").MDL_ASSEMBLY)
throw new Error (0, "Current model is not an assembly.");
// 將此模型設為組立物件
var assembly = model;
/**---------------------- LEGO_BODY--------------------**/
// 設定零件的 descriptor 物件變數
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName("v:/home/lego/man/LEGO_BODY.prt");
// 若零件在 session 則直接取用
var componentModel = session.GetModelFromDescr(descr);
// 若零件不在 session 則從工作目錄中載入 session
var componentModel = session.RetrieveModel(descr);
// 若零件已經在 session 中則放入組立檔中
if (componentModel != void null)
{
// 注意這個 asmcomp 即為設定約束條件的本體
// asmcomp 為特徵物件, 直接將零件, 以 transf 座標轉換矩陣方位放入組立檔案中
var asmcomp = assembly.AssembleComponent(componentModel, transf);
}
// 建立約束條件變數
var constrs = pfcCreate("pfcComponentConstraints");
// 設定組立檔中的三個定位面, 注意內定名稱與 Pro/E WF 中的 ASM_D_FRONT 不同, 而是 ASM_FRONT, 可在組立件->info->model 中查詢定位面名稱
// 組立檔案中的 Datum 名稱也可以利用 View->plane tag display 查詢名稱
// 建立組立參考面所組成的陣列
var asmDatums = new Array("ASM_FRONT", "ASM_TOP", "ASM_RIGHT");
// 設定零件檔中的三個定位面, 名稱與 Pro/E WF 中相同
var compDatums = new Array("FRONT", "TOP", "RIGHT");
// 建立 ids 變數, intseq 為 sequence of integers 為資料類別, 使用者可以經由整數索引擷取此資料類別的元件, 第一個索引為 0
// intseq 等同 Python 的數列資料?
var ids = pfcCreate("intseq");
// 利用 assembly 物件模型, 建立路徑變數
var path = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
// 採用互動式設定相關的變數, MpfcSelect 為 Module level class 中的一種
var MpfcSelect = pfcCreate("MpfcSelect");
// 利用迴圈分別約束組立與零件檔中的三個定位平面
for (var i = 0; i < 3; i++)
{
// 設定組立參考面, 也就是 "ASM_FRONT", "ASM_TOP", "ASM_RIGHT" 等三個 datum planes
var asmItem = assembly.GetItemByName (pfcCreate("pfcModelItemType").ITEM_SURFACE, asmDatums[i]);
// 若無對應的組立參考面, 則啟用互動式平面選擇表單 flag
if (asmItem == void null)
{
interactFlag = true;
continue;
}
// 設定零件參考面, 也就是 "FRONT", "TOP", "RIGHT" 等三個 datum planes
var compItem = componentModel.GetItemByName (pfcCreate ("pfcModelItemType").ITEM_SURFACE, compDatums[i]);
// 若無對應的零件參考面, 則啟用互動式平面選擇表單 flag
if (compItem == void null)
{
interactFlag = true;
continue;
}
// 因為 asmItem 為組立件中的定位特徵, 必須透過 path 才能取得
var asmSel = MpfcSelect.CreateModelItemSelection(asmItem, path);
// 而 compItem 則為零件, 沒有 path 路徑, 因此第二變數為 null
var compSel = MpfcSelect.CreateModelItemSelection(compItem, void null);
// 利用 ASM_CONSTRAINT_ALIGN 對齊組立約束建立約束變數
var constr = pfcCreate("pfcComponentConstraint").Create (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN);
// 設定約束條件的組立參考與元件參考選擇
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
// 第一個變數為強制變數, 第二個為忽略變數
// 強制變數為 false, 表示不強制約束, 只有透過點與線對齊時需設為 true
// 忽略變數為 false, 約束條件在更新模型時是否忽略, 設為 false 表示不忽略
// 通常在組立 closed chain 機構時, 忽略變數必須設為 true, 才能完成約束
// 因為三個面絕對約束, 因此輸入變數為 false, false
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (false, false);
// 將互動選擇相關資料, 附加在程式約束變數之後
constrs.Append(constr);
}
// 設定組立約束條件
asmcomp.SetConstraints (constrs, void null);
/**---------------------- LEGO_ARM_RT 右手上臂--------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/man/LEGO_ARM_RT.prt");
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
// 注意這個 asmcomp 即為設定約束條件的本體
// asmcomp 為特徵物件,直接將零件, 以 transf 座標轉換放入組立檔案中
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
// 取得 assembly 項下的元件 id, 因為只有一個零件, 採用 index 0 取出其 featID
var components = assembly.ListFeaturesByType(true, pfcCreate ("pfcFeatureType").FEATTYPE_COMPONENT);
// 此一 featID 為組立件中的第一個零件編號, 也就是樂高人偶的 body
var featID = components.Item(0).Id;
ids.Append(featID);
// 在 assembly 模型中建立子零件所對應的路徑
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
// 以下針對 body 的 A_13 軸與 DTM1 基準面及右臂的 A_4 軸線與 DTM1 進行對齊與面接約束
var asmDatums = new Array("A_13", "DTM1");
var compDatums = new Array("A_4", "DTM1");
// 組立的關係變數為對齊與面接
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
// 組立元件則為軸與平面
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS, pfcCreate("pfcModelItemType").ITEM_SURFACE);
// 建立約束條件變數, 軸採對齊而基準面則以面接進行約束
var constrs = pfcCreate ("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
// 設定組立參考面, asmItem 為 model item
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
// 若無對應的組立參考面, 則啟用互動式平面選擇表單 flag
if (asmItem == void null)
{
interactFlag = true;
continue;
}
// 設定零件參考面, compItem 為 model item
var compItem = componentModel.GetItemByName (relationItem[i], compDatums[i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
// 採用互動式設定相關的變數
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
// 因為透過軸線對齊, 第一 force 變數需設為 true
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (true, false);
// 將互動選擇相關資料, 附加在程式約束變數之後
constrs.Append(constr);
}
// 設定組立約束條件, 以 asmcomp 特徵進行約束條件設定
// 請注意, 第二個變數必須為 void null 表示零件對零件進行約束, 若為 subPath, 則零件會與原始零件的平面進行約束
asmcomp.SetConstraints (constrs, void null);
/**---------------------- LEGO_ARM_LT 左手上臂--------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/man/LEGO_ARM_LT.prt");
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
// 注意這個 asmcomp 即為設定約束條件的本體
// asmcomp 為特徵物件,直接將零件, 以 transf 座標轉換放入組立檔案中
var asmcomp = assembly.AssembleComponent(componentModel, transf);
}
// 取得 assembly 項下的元件 id, 因為只有一個零件, 採用 index 0 取出其 featID
var components = assembly.ListFeaturesByType(true, pfcCreate ("pfcFeatureType").FEATTYPE_COMPONENT);
var ids = pfcCreate ("intseq");
// 因為左臂也是與 body 進行約束條件組立, 因此取 body 的 featID
// 至此右臂 id 應該是 featID+1, 而左臂則是 featID+2
ids.Append(featID);
// 在 assembly 模型中建立子零件所對應的路徑
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
var asmDatums = new Array("A_9", "DTM2");
var compDatums = new Array("A_4", "DTM1");
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS, pfcCreate("pfcModelItemType").ITEM_SURFACE);
// 建立約束條件變數
var constrs = pfcCreate ("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
// 設定組立參考面, asmItem 為 model item
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
// 若無對應的組立參考面, 則啟用互動式平面選擇表單 flag
if (asmItem == void null)
{
interactFlag = true;
continue;
}
// 設定零件參考面, compItem 為 model item
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
// 採用互動式設定相關的變數
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (true, false);
// 將互動選擇相關資料, 附加在程式約束變數之後
constrs.Append(constr);
}
// 設定組立約束條件, 以 asmcomp 特徵進行約束條件設定
// 請注意, 第二個變數必須為 void null 表示零件對零件進行約束, 若為 subPath, 則零件會與原始零件的平面進行約束
asmcomp.SetConstraints (constrs, void null);
/**---------------------- LEGO_HAND 右手手腕--------------------**/
// 右手臂 LEGO_ARM_RT.prt 基準 A_2, DTM2
// 右手腕 LEGO_HAND.prt 基準 A_1, DTM3
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/man/LEGO_HAND.prt");
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
// 注意這個 asmcomp 即為設定約束條件的本體
// asmcomp 為特徵物件,直接將零件, 以 transf 座標轉換放入組立檔案中
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
// 取得 assembly 項下的元件 id, 因為只有一個零件, 採用 index 0 取出其 featID
var components = assembly.ListFeaturesByType(true, pfcCreate ("pfcFeatureType").FEATTYPE_COMPONENT);
var ids = pfcCreate ("intseq");
// 組立件中 LEGO_BODY.prt 編號為 featID
// LEGO_ARM_RT.prt 則是組立件第二個置入的零件, 編號為 featID+1
ids.Append(featID+1);
// 在 assembly 模型中, 根據子零件的編號, 建立子零件所對應的路徑
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
// 以下針對 LEGO_ARM_RT 的 A_2 軸與 DTM2 基準面及 HAND 的 A_1 軸線與 DTM3 進行對齊與面接約束
var asmDatums = new Array("A_2", "DTM2");
var compDatums = new Array("A_1", "DTM3");
// 組立的關係變數為對齊與面接
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
// 組立元件則為軸與平面
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS, pfcCreate("pfcModelItemType").ITEM_SURFACE);
// 建立約束條件變數, 軸採對齊而基準面則以面接進行約束
var constrs = pfcCreate ("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
// 設定組立參考面, asmItem 為 model item
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
// 若無對應的組立參考面, 則啟用互動式平面選擇表單 flag
if (asmItem == void null)
{
interactFlag = true;
continue;
}
// 設定零件參考面, compItem 為 model item
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
// 採用互動式設定相關的變數
var MpfcSelect = pfcCreate("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection(asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
// 因為透過軸線對齊, 第一 force 變數需設為 true
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (true, false);
// 將互動選擇相關資料, 附加在程式約束變數之後
constrs.Append(constr);
}
// 設定組立約束條件, 以 asmcomp 特徵進行約束條件設定
// 請注意, 第二個變數必須為 void null 表示零件對零件進行約束, 若為 subPath, 則零件會與原始零件的平面進行約束
asmcomp.SetConstraints (constrs, void null);
// 利用函式呼叫組立左手 HAND
axis_plane_assembly(session, assembly, transf, featID, 2,
"LEGO_HAND.prt", "A_2", "DTM2", "A_1", "DTM3");
// 利用函式呼叫組立人偶頭部 HEAD
// BODY id 為 featID+0, 以 A_2 及 DTM3 約束
// HEAD 則直接呼叫檔案名稱, 以 A_2, DTM2 約束
axis_plane_assembly(session, assembly, transf, featID, 0,
"LEGO_HEAD.prt", "A_2", "DTM3", "A_2", "DTM2");
// Body 與 WAIST 採三個平面約束組立
// Body 組立面為 DTM4, DTM5, DTM6
// WAIST 組立面為 DTM1, DTM2, DTM3
three_plane_assembly(session, assembly, transf, featID, 0, "LEGO_WAIST.prt", "DTM4", "DTM5", "DTM6", "DTM1", "DTM2", "DTM3");
// 右腳
axis_plane_assembly(session, assembly, transf, featID, 6,
"LEGO_LEG_RT.prt", "A_8", "DTM4", "A_10", "DTM1");
// 左腳
axis_plane_assembly(session, assembly, transf, featID, 6,
"LEGO_LEG_LT.prt", "A_8", "DTM5", "A_10", "DTM1");
// 紅帽
axis_plane_assembly(session, assembly, transf, featID, 5,
"LEGO_HAT.prt", "A_2", "TOP", "A_2", "FRONT");
// regenerate 並且 repaint 組立檔案
assembly.Regenerate (void null);
session.GetModelWindow (assembly).Repaint();
</script>
</body>
</html>
'''
return outstring
|
agpl-3.0
|
jaywink/federation
|
federation/utils/network.py
|
2
|
8856
|
import calendar
import datetime
import logging
import re
import socket
from typing import Optional, Dict
from urllib.parse import quote
from uuid import uuid4
import requests
from requests.exceptions import RequestException, HTTPError, SSLError
from requests.exceptions import ConnectionError
from requests.structures import CaseInsensitiveDict
from federation import __version__
logger = logging.getLogger("federation")
USER_AGENT = "python/federation/%s" % __version__
def fetch_content_type(url: str) -> Optional[str]:
"""
Fetch the HEAD of the remote url to determine the content type.
"""
try:
response = requests.head(url, headers={'user-agent': USER_AGENT}, timeout=10)
except RequestException as ex:
logger.warning("fetch_content_type - %s when fetching url %s", ex, url)
else:
return response.headers.get('Content-Type')
def fetch_document(url=None, host=None, path="/", timeout=10, raise_ssl_errors=True, extra_headers=None):
"""Helper method to fetch remote document.
Must be given either the ``url`` or ``host``.
If ``url`` is given, only that will be tried without falling back to http from https.
If ``host`` given, `path` will be added to it. Will fall back to http on non-success status code.
:arg url: Full url to fetch, including protocol
:arg host: Domain part only without path or protocol
:arg path: Path without domain (defaults to "/")
:arg timeout: Seconds to wait for response (defaults to 10)
:arg raise_ssl_errors: Pass False if you want to try HTTP even for sites with SSL errors (default True)
:arg extra_headers: Optional extra headers dictionary to add to requests
:returns: Tuple of document (str or None), status code (int or None) and error (an exception class instance or None)
:raises ValueError: If neither url nor host are given as parameters
"""
if not url and not host:
raise ValueError("Need url or host.")
logger.debug("fetch_document: url=%s, host=%s, path=%s, timeout=%s, raise_ssl_errors=%s",
url, host, path, timeout, raise_ssl_errors)
headers = {'user-agent': USER_AGENT}
if extra_headers:
headers.update(extra_headers)
if url:
# Use url since it was given
logger.debug("fetch_document: trying %s", url)
try:
response = requests.get(url, timeout=timeout, headers=headers)
logger.debug("fetch_document: found document, code %s", response.status_code)
response.raise_for_status()
return response.text, response.status_code, None
except RequestException as ex:
logger.debug("fetch_document: exception %s", ex)
return None, None, ex
# Build url with some little sanitizing
host_string = host.replace("http://", "").replace("https://", "").strip("/")
path_string = path if path.startswith("/") else "/%s" % path
url = "https://%s%s" % (host_string, path_string)
logger.debug("fetch_document: trying %s", url)
try:
response = requests.get(url, timeout=timeout, headers=headers)
logger.debug("fetch_document: found document, code %s", response.status_code)
response.raise_for_status()
return response.text, response.status_code, None
except (HTTPError, SSLError, ConnectionError) as ex:
if isinstance(ex, SSLError) and raise_ssl_errors:
logger.debug("fetch_document: exception %s", ex)
return None, None, ex
# Try http then
url = url.replace("https://", "http://")
logger.debug("fetch_document: trying %s", url)
try:
response = requests.get(url, timeout=timeout, headers=headers)
logger.debug("fetch_document: found document, code %s", response.status_code)
response.raise_for_status()
return response.text, response.status_code, None
except RequestException as ex:
logger.debug("fetch_document: exception %s", ex)
return None, None, ex
except RequestException as ex:
logger.debug("fetch_document: exception %s", ex)
return None, None, ex
def fetch_host_ip(host: str) -> str:
"""
Fetch ip by host
"""
try:
ip = socket.gethostbyname(host)
except socket.gaierror:
return ''
return ip
def fetch_file(url: str, timeout: int = 30, extra_headers: Dict = None) -> str:
"""
Download a file with a temporary name and return the name.
"""
headers = {'user-agent': USER_AGENT}
if extra_headers:
headers.update(extra_headers)
response = requests.get(url, timeout=timeout, headers=headers, stream=True)
response.raise_for_status()
name = f"/tmp/{str(uuid4())}"
with open(name, "wb") as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
return name
def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
Implementation copied from Django.
https://github.com/django/django/blob/master/django/utils/http.py#L157
License: BSD 3-clause
"""
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
# email.utils.parsedate() does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception as exc:
raise ValueError("%r is not a valid date" % date) from exc
def send_document(url, data, timeout=10, method="post", *args, **kwargs):
"""Helper method to send a document via POST.
Additional ``*args`` and ``**kwargs`` will be passed on to ``requests.post``.
:arg url: Full url to send to, including protocol
:arg data: Dictionary (will be form-encoded), bytes, or file-like object to send in the body
:arg timeout: Seconds to wait for response (defaults to 10)
:arg method: Method to use, defaults to post
:returns: Tuple of status code (int or None) and error (exception class instance or None)
"""
logger.debug("send_document: url=%s, data=%s, timeout=%s, method=%s", url, data, timeout, method)
if not method:
method = "post"
headers = CaseInsensitiveDict({
'User-Agent': USER_AGENT,
})
if "headers" in kwargs:
# Update from kwargs
headers.update(kwargs.get("headers"))
kwargs.update({
"data": data, "timeout": timeout, "headers": headers
})
request_func = getattr(requests, method)
try:
response = request_func(url, *args, **kwargs)
logger.debug("send_document: response status code %s", response.status_code)
return response.status_code, None
# TODO support rate limit 429 code
except RequestException as ex:
logger.debug("send_document: exception %s", ex)
return None, ex
def try_retrieve_webfinger_document(handle: str) -> Optional[str]:
"""
Try to retrieve an RFC7033 webfinger document. Does not raise if it fails.
"""
try:
host = handle.split("@")[1]
except AttributeError:
logger.warning("retrieve_webfinger_document: invalid handle given: %s", handle)
return None
document, code, exception = fetch_document(
host=host, path="/.well-known/webfinger?resource=acct:%s" % quote(handle),
)
if exception:
logger.debug("retrieve_webfinger_document: failed to fetch webfinger document: %s, %s", code, exception)
return document
|
bsd-3-clause
|
TeamEOS/external_chromium_org
|
components/test/data/autofill/merge/tools/flatten.py
|
162
|
2132
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
def main():
"""Converts a vertical serialization into a compact, horizontal serialization.
"""
COLUMNS = ['First name', 'Middle name', 'Last name', 'Email', 'Company name',
'Address line 1', 'Address line 2', 'City', 'State', 'ZIP code',
'Country', 'Phone']
if len(sys.argv) != 2:
print "Usage: python flatten.py <path/to/serialized_profiles>"
return
profiles = [COLUMNS]
with open(sys.argv[1], 'r') as serialized_profiles:
profile = []
previous_field_type = ''
for line in serialized_profiles:
# Trim the newline if present.
if line[-1] == '\n':
line = line[:-1]
if line == "---":
if len(profile):
# Reached the end of a profile.
# Save the current profile and prepare to build up the next one.
profiles.append(profile)
profile = []
else:
# Append the current field's value to the current profile.
line_parts = line.split(': ', 1)
field_type = line_parts[0]
field_value = line_parts[1]
if field_type != previous_field_type:
profile.append("'%s'" % field_value)
else:
# This is a non-primary value for a multi-valued field.
profile[-1] += ", '%s'" % field_value
previous_field_type = field_type
if len(profile):
profiles.append(profile)
# Prepare format strings so that we can align the contents of each column.
transposed = zip(*profiles)
column_widths = []
for column in transposed:
widths = [len(value) for value in column]
column_widths.append(max(widths))
column_formats = ["{0:<" + str(width) + "}" for width in column_widths]
for profile in profiles:
profile_format = zip(column_formats, profile)
profile = [format_.format(value) for (format_, value) in profile_format]
print " | ".join(profile)
return 0
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
eemirtekin/edx-platform
|
common/djangoapps/track/tests/test_util.py
|
239
|
1203
|
from datetime import datetime
import json
from pytz import UTC
from django.test import TestCase
from track.utils import DateTimeJSONEncoder
class TestDateTimeJSONEncoder(TestCase):
def test_datetime_encoding(self):
a_naive_datetime = datetime(2012, 05, 01, 07, 27, 10, 20000)
a_tz_datetime = datetime(2012, 05, 01, 07, 27, 10, 20000, tzinfo=UTC)
a_date = a_naive_datetime.date()
an_iso_datetime = '2012-05-01T07:27:10.020000+00:00'
an_iso_date = '2012-05-01'
obj = {
'number': 100,
'string': 'hello',
'object': {'a': 1},
'a_datetime': a_naive_datetime,
'a_tz_datetime': a_tz_datetime,
'a_date': a_date,
}
to_json = json.dumps(obj, cls=DateTimeJSONEncoder)
from_json = json.loads(to_json)
self.assertEqual(from_json['number'], 100)
self.assertEqual(from_json['string'], 'hello')
self.assertEqual(from_json['object'], {'a': 1})
self.assertEqual(from_json['a_datetime'], an_iso_datetime)
self.assertEqual(from_json['a_tz_datetime'], an_iso_datetime)
self.assertEqual(from_json['a_date'], an_iso_date)
|
agpl-3.0
|
mach0/QGIS
|
tests/src/python/test_qgscoordinatetransform.py
|
15
|
9018
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsCoordinateTransform.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2012 by Tim Sutton'
__date__ = '20/08/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsRectangle,
QgsCoordinateReferenceSystem,
QgsCoordinateTransform,
QgsCoordinateTransformContext,
QgsDatumTransform,
QgsProject,
QgsProjUtils)
from qgis.testing import start_app, unittest
start_app()
class TestQgsCoordinateTransform(unittest.TestCase):
def testTransformBoundingBox(self):
"""Test that we can transform a rectangular bbox from utm56s to LonLat"""
myExtent = QgsRectangle(242270, 6043737, 246330, 6045897)
myGeoCrs = QgsCoordinateReferenceSystem('EPSG:4326')
myUtmCrs = QgsCoordinateReferenceSystem('EPSG:32756')
myXForm = QgsCoordinateTransform(myUtmCrs, myGeoCrs, QgsProject.instance())
myProjectedExtent = myXForm.transformBoundingBox(myExtent)
myExpectedExtent = ('150.1509239873580270,-35.7176936443908772 : '
'150.1964384662953194,-35.6971885216629090')
myExpectedValues = [150.1509239873580270, -35.7176936443908772,
150.1964384662953194, -35.6971885216629090]
myMessage = ('Expected:\n%s\nGot:\n%s\n' %
(myExpectedExtent,
myProjectedExtent.toString()))
self.assertAlmostEqual(myExpectedValues[0], myProjectedExtent.xMinimum(), msg=myMessage)
self.assertAlmostEqual(myExpectedValues[1], myProjectedExtent.yMinimum(), msg=myMessage)
self.assertAlmostEqual(myExpectedValues[2], myProjectedExtent.xMaximum(), msg=myMessage)
self.assertAlmostEqual(myExpectedValues[3], myProjectedExtent.yMaximum(), msg=myMessage)
def testTransformBoundingBoxSizeOverflowProtection(self):
"""Test transform bounding box size overflow protection (github issue #32302)"""
extent = QgsRectangle(-176.0454709164556562, 89.9999999999998153, 180.0000000000000000, 90.0000000000000000)
transform = d = QgsCoordinateTransform(QgsCoordinateReferenceSystem('EPSG:4236'), QgsCoordinateReferenceSystem('EPSG:3031'), QgsProject.instance())
# this test checks that the line below doesn't assert and crash
transformedExtent = transform.transformBoundingBox(extent)
def testTransformQgsRectangle_Regression17600(self):
"""Test that rectangle transform is in the bindings"""
myExtent = QgsRectangle(-1797107, 4392148, 6025926, 6616304)
myGeoCrs = QgsCoordinateReferenceSystem('EPSG:4326')
myUtmCrs = QgsCoordinateReferenceSystem('EPSG:3857')
myXForm = QgsCoordinateTransform(myUtmCrs, myGeoCrs, QgsProject.instance())
myTransformedExtent = myXForm.transform(myExtent)
myTransformedExtentForward = myXForm.transform(myExtent, QgsCoordinateTransform.ForwardTransform)
self.assertAlmostEquals(myTransformedExtentForward.xMaximum(), myTransformedExtent.xMaximum())
self.assertAlmostEquals(myTransformedExtentForward.xMinimum(), myTransformedExtent.xMinimum())
self.assertAlmostEquals(myTransformedExtentForward.yMaximum(), myTransformedExtent.yMaximum())
self.assertAlmostEquals(myTransformedExtentForward.yMinimum(), myTransformedExtent.yMinimum())
self.assertAlmostEquals(myTransformedExtentForward.xMaximum(), 54.13181426773211)
self.assertAlmostEquals(myTransformedExtentForward.xMinimum(), -16.14368685298181)
self.assertAlmostEquals(myTransformedExtentForward.yMaximum(), 50.971783118386895)
self.assertAlmostEquals(myTransformedExtentForward.yMinimum(), 36.66235970825241)
myTransformedExtentReverse = myXForm.transform(myTransformedExtent, QgsCoordinateTransform.ReverseTransform)
self.assertAlmostEquals(myTransformedExtentReverse.xMaximum(), myExtent.xMaximum())
self.assertAlmostEquals(myTransformedExtentReverse.xMinimum(), myExtent.xMinimum())
self.assertAlmostEquals(myTransformedExtentReverse.yMaximum(), myExtent.yMaximum())
self.assertAlmostEquals(myTransformedExtentReverse.yMinimum(), myExtent.yMinimum())
def testContextProj6(self):
"""
Various tests to ensure that datum transforms are correctly set respecting context
"""
context = QgsCoordinateTransformContext()
context.addCoordinateOperation(QgsCoordinateReferenceSystem('EPSG:28356'),
QgsCoordinateReferenceSystem('EPSG:4283'),
'proj')
transform = QgsCoordinateTransform(QgsCoordinateReferenceSystem('EPSG:28354'), QgsCoordinateReferenceSystem('EPSG:28353'), context)
self.assertEqual(list(transform.context().coordinateOperations().keys()), [('EPSG:28356', 'EPSG:4283')])
# should be no coordinate operation
self.assertEqual(transform.coordinateOperation(), '')
# should default to allowing fallback transforms
self.assertTrue(transform.allowFallbackTransforms())
transform = QgsCoordinateTransform(QgsCoordinateReferenceSystem('EPSG:28356'),
QgsCoordinateReferenceSystem('EPSG:4283'), context)
self.assertTrue(transform.allowFallbackTransforms())
context.addCoordinateOperation(QgsCoordinateReferenceSystem('EPSG:28356'),
QgsCoordinateReferenceSystem('EPSG:4283'),
'proj', False)
transform = QgsCoordinateTransform(QgsCoordinateReferenceSystem('EPSG:28356'),
QgsCoordinateReferenceSystem('EPSG:4283'), context)
self.assertFalse(transform.allowFallbackTransforms())
# matching source
transform = QgsCoordinateTransform(QgsCoordinateReferenceSystem('EPSG:28356'), QgsCoordinateReferenceSystem('EPSG:28353'), context)
self.assertEqual(transform.coordinateOperation(), '')
# matching dest
transform = QgsCoordinateTransform(QgsCoordinateReferenceSystem('EPSG:28354'),
QgsCoordinateReferenceSystem('EPSG:4283'), context)
self.assertEqual(transform.coordinateOperation(), '')
# matching src/dest pair
transform = QgsCoordinateTransform(QgsCoordinateReferenceSystem('EPSG:28356'),
QgsCoordinateReferenceSystem('EPSG:4283'), context)
self.assertEqual(transform.coordinateOperation(), 'proj')
# test manual overwriting
transform.setCoordinateOperation('proj2')
self.assertEqual(transform.coordinateOperation(), 'proj2')
transform.setAllowFallbackTransforms(False)
self.assertFalse(transform.allowFallbackTransforms())
transform.setAllowFallbackTransforms(True)
self.assertTrue(transform.allowFallbackTransforms())
# test that auto operation setting occurs when updating src/dest crs
transform.setSourceCrs(QgsCoordinateReferenceSystem('EPSG:28356'))
self.assertEqual(transform.coordinateOperation(), 'proj')
transform.setCoordinateOperation('proj2')
transform.setDestinationCrs(QgsCoordinateReferenceSystem('EPSG:4283'))
self.assertEqual(transform.coordinateOperation(), 'proj')
transform.setCoordinateOperation('proj2')
# delayed context set
transform = QgsCoordinateTransform()
self.assertEqual(transform.coordinateOperation(), '')
transform.setSourceCrs(QgsCoordinateReferenceSystem('EPSG:28356'))
transform.setDestinationCrs(QgsCoordinateReferenceSystem('EPSG:4283'))
self.assertEqual(transform.coordinateOperation(), '')
transform.setContext(context)
self.assertEqual(transform.coordinateOperation(), 'proj')
self.assertEqual(list(transform.context().coordinateOperations().keys()), [('EPSG:28356', 'EPSG:4283')])
def testProjectContextProj6(self):
"""
Test creating transform using convenience constructor which takes project reference
"""
p = QgsProject()
context = p.transformContext()
context.addCoordinateOperation(QgsCoordinateReferenceSystem('EPSG:28356'),
QgsCoordinateReferenceSystem('EPSG:3111'), 'proj')
p.setTransformContext(context)
transform = QgsCoordinateTransform(QgsCoordinateReferenceSystem('EPSG:28356'), QgsCoordinateReferenceSystem('EPSG:3111'), p)
self.assertEqual(transform.coordinateOperation(), 'proj')
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
sclabs/sccms-nonrel
|
django/template/loaders/filesystem.py
|
229
|
2358
|
"""
Wrapper for loading templates from the filesystem.
"""
from django.conf import settings
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.utils._os import safe_join
class Loader(BaseLoader):
is_usable = True
def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = settings.TEMPLATE_DIRS
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of this particular
# template_dir (it might be inside another one, so this isn't
# fatal).
pass
def load_template_source(self, template_name, template_dirs=None):
tried = []
for filepath in self.get_template_sources(template_name, template_dirs):
try:
file = open(filepath)
try:
return (file.read().decode(settings.FILE_CHARSET), filepath)
finally:
file.close()
except IOError:
tried.append(filepath)
if tried:
error_msg = "Tried %s" % tried
else:
error_msg = "Your TEMPLATE_DIRS setting is empty. Change it to point to at least one template directory."
raise TemplateDoesNotExist(error_msg)
load_template_source.is_usable = True
_loader = Loader()
def load_template_source(template_name, template_dirs=None):
# For backwards compatibility
import warnings
warnings.warn(
"'django.template.loaders.filesystem.load_template_source' is deprecated; use 'django.template.loaders.filesystem.Loader' instead.",
DeprecationWarning
)
return _loader.load_template_source(template_name, template_dirs)
load_template_source.is_usable = True
|
bsd-3-clause
|
zetaris/zeppelin
|
spark/interpreter/src/main/resources/python/zeppelin_ipyspark.py
|
9
|
2845
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
# for back compatibility
from pyspark.sql import SQLContext
# start JVM gateway
if "PY4J_GATEWAY_SECRET" in os.environ:
from py4j.java_gateway import GatewayParameters
gateway_secret = os.environ["PY4J_GATEWAY_SECRET"]
gateway = JavaGateway(gateway_parameters=GatewayParameters(address="${JVM_GATEWAY_ADDRESS}",
port=${JVM_GATEWAY_PORT}, auth_token=gateway_secret, auto_convert=True))
else:
gateway = JavaGateway(GatewayClient(address="${JVM_GATEWAY_ADDRESS}", port=${JVM_GATEWAY_PORT}), auto_convert=True)
java_import(gateway.jvm, "org.apache.spark.SparkEnv")
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
intp = gateway.entry_point
jsc = intp.getJavaSparkContext()
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
java_import(gateway.jvm, "scala.Tuple2")
jconf = jsc.getConf()
conf = SparkConf(_jvm=gateway.jvm, _jconf=jconf)
sc = _zsc_ = SparkContext(jsc=jsc, gateway=gateway, conf=conf)
if intp.isSpark2():
from pyspark.sql import SparkSession
spark = __zSpark__ = SparkSession(sc, intp.getSparkSession())
sqlContext = sqlc = __zSqlc__ = __zSpark__._wrapped
else:
sqlContext = sqlc = __zSqlc__ = SQLContext(sparkContext=sc, sqlContext=intp.getSQLContext())
class IPySparkZeppelinContext(PyZeppelinContext):
def __init__(self, z, gateway):
super(IPySparkZeppelinContext, self).__init__(z, gateway)
def show(self, obj):
from pyspark.sql import DataFrame
if isinstance(obj, DataFrame):
print(self.z.showData(obj._jdf))
else:
super(IPySparkZeppelinContext, self).show(obj)
z = __zeppelin__ = IPySparkZeppelinContext(intp.getZeppelinContext(), gateway)
|
apache-2.0
|
bitmazk/django-frequently
|
frequently/tests/forms_tests.py
|
1
|
1290
|
"""Tests for the forms of the ``django-frequently`` app."""
from django.test import TestCase
from mixer.backend.django import mixer
from .. import forms
class EntryFormTestCase(TestCase):
"""Tests for the ``EntryForm`` form class."""
def setUp(self):
self.owner = mixer.blend('auth.User')
def test_form(self):
data = {
'question': ('This is a very long question to test the slug'
' generator and the truncation results. Sometimes'
' questions can become very very long, so we will'
' have to be careful to not create exceptions.'),
'submitted_by': 'info@example.com',
}
form = forms.EntryForm(data=data)
self.assertTrue(form.is_valid())
with self.settings(FREQUENTLY_REQUIRE_EMAIL=False):
form = forms.EntryForm(data=data, owner=self.owner)
self.assertTrue(form.is_valid())
obj = form.save()
self.assertEqual(obj.submitted_by, self.owner.email)
self.assertEqual(obj.slug, ('this-is-a-very-long-question-to-test-'
'the-slug-generator-and-the-truncation'
'-results-sometimes-questio'))
|
mit
|
herrnst/script.xbmc.lcdproc
|
resources/lib/lcdproc.py
|
1
|
21691
|
'''
XBMC LCDproc addon
Copyright (C) 2012-2018 Team Kodi
Copyright (C) 2012-2018 Daniel 'herrnst' Scheller
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import telnetlib
import time
import xbmc
from socket import *
from .settings import *
from .lcdbase import *
from .lcdproc_extra_imon import *
from .lcdproc_extra_mdm166a import *
from .infolabels import *
MAX_ROWS = 20
MAX_BIGDIGITS = 20
INIT_RETRY_INTERVAL = 2
INIT_RETRY_INTERVAL_MAX = 60
class LCDProc(LcdBase):
def __init__(self, settings):
self.m_bStop = True
self.m_lastInitAttempt = 0
self.m_initRetryInterval = INIT_RETRY_INTERVAL
self.m_used = True
self.tn = telnetlib.Telnet()
self.tnsocket = None
self.m_timeLastSockAction = time.time()
self.m_timeSocketIdleTimeout = 2
self.m_strLineText = [None]*MAX_ROWS
self.m_strLineType = [None]*MAX_ROWS
self.m_bstrLineIcon = [None]*MAX_ROWS
self.m_strDigits = [None]*MAX_BIGDIGITS
self.m_iProgressBarWidth = 0
self.m_iProgressBarLine = -1
self.m_bstrIconName = b"BLOCK_FILLED"
self.m_iBigDigits = int(8) # 12:45:78 / colons count as digit
self.m_iOffset = 1
self.m_bstrSetLineCmds = b""
self.m_cExtraIcons = None
LcdBase.__init__(self, settings)
def SendCommand(self, strCmd, bCheckRet):
countcmds = strCmd.count(b'\n')
sendcmd = strCmd
ret = True
# Single command without lf
if countcmds < 1:
countcmds = 1
sendcmd += b"\n"
try:
# Send to server via raw socket to prevent telnetlib tampering with
# certain chars (especially 0xFF -> telnet IAC)
self.tnsocket.sendall(sendcmd)
except:
# Something bad happened, abort
log(LOGERROR, "SendCommand: Telnet exception - send")
return False
# Update last socketaction timestamp
self.m_timeLastSockAction = time.time()
# Repeat for number of found commands
for i in range(1, (countcmds + 1)):
# Read in (multiple) responses
while True:
try:
# Read server reply
reply = self.tn.read_until(b"\n",3)
except:
# (Re)read failed, abort
log(LOGERROR, "SendCommand: Telnet exception - reread")
return False
# Skip these messages
if reply[:6] == b'listen':
continue
elif reply[:6] == b'ignore':
continue
elif reply[:3] == b'key':
continue
elif reply[:9] == b'menuevent':
continue
# Response seems interesting, so stop here
break
if not bCheckRet:
continue # no return checking desired, so be fine
if strCmd == b'noop' and reply == b'noop complete\n':
continue # noop has special reply
if reply == b'success\n':
continue
ret = False
# Leave information something undesired happened
if ret is False:
log(LOGWARNING, "Reply to '%s' was '%s'" % (strCmd.decode(self.m_strLCDEncoding), reply.decode(self.m_strLCDEncoding)))
return ret
def SetupScreen(self):
# Add screen first
if not self.SendCommand(b"screen_add xbmc", True):
return False
# Set screen priority
if not self.SendCommand(b"screen_set xbmc -priority info", True):
return False
# Turn off heartbeat if desired
if not self.m_Settings.getHeartBeat():
if not self.SendCommand(b"screen_set xbmc -heartbeat off", True):
return False
# Initialize command list var
strInitCommandList = b""
# Setup widgets (scrollers and hbars first)
for i in range(1,int(self.m_iRows)+1):
# Text widgets
strInitCommandList += b"widget_add xbmc lineScroller%i scroller\n" % (i)
# Progress bars
strInitCommandList += b"widget_add xbmc lineProgress%i hbar\n" % (i)
# Reset bars to zero
strInitCommandList += b"widget_set xbmc lineProgress%i 0 0 0\n" % (i)
self.m_strLineText[i-1] = ""
self.m_strLineType[i-1] = ""
# Setup icons last
for i in range(1,int(self.m_iRows)+1):
# Icons
strInitCommandList += b"widget_add xbmc lineIcon%i icon\n" % (i)
# Default icon
strInitCommandList += b"widget_set xbmc lineIcon%i 0 0 BLOCK_FILLED\n" % (i)
self.m_bstrLineIcon[i-1] = b""
for i in range(1,int(self.m_iBigDigits + 1)):
# Big Digit
strInitCommandList += b"widget_add xbmc lineBigDigit%i num\n" % (i)
# Set Digit
strInitCommandList += b"widget_set xbmc lineBigDigit%i 0 0\n" % (i)
self.m_strDigits[i] = b""
if not self.SendCommand(strInitCommandList, True):
return False
return True
def Initialize(self):
connected = False
if not self.m_used:
return False#nothing to do
#don't try to initialize too often
now = time.time()
if (now - self.m_lastInitAttempt) < self.m_initRetryInterval:
return False
self.m_lastInitAttempt = now
if self.Connect():
if LcdBase.Initialize(self):
# reset the retry interval after a successful connect
self.m_initRetryInterval = INIT_RETRY_INTERVAL
self.m_bStop = False
connected = True
else:
log(LOGERROR, "Connection successful but LCD.xml has errors, aborting connect")
if not connected:
# preventively close socket
self.CloseSocket()
# give up after INIT_RETRY_INTERVAL_MAX (60) seconds
if self.m_initRetryInterval > INIT_RETRY_INTERVAL_MAX:
self.m_used = False
log(LOGERROR,"Connect failed. Giving up. Please fix any connection problems and restart the addon.")
else:
self.m_initRetryInterval = self.m_initRetryInterval * 2
log(LOGERROR,"Connect failed. Retry in %d seconds." % self.m_initRetryInterval)
return connected
def DetermineExtraSupport(self):
rematch_imon = "SoundGraph iMON(.*)LCD"
rematch_mdm166a = "Targa(.*)mdm166a"
rematch_imonvfd = "Soundgraph(.*)VFD"
bUseExtraIcons = self.m_Settings.getUseExtraElements()
# Never cause script failure/interruption by this! This is totally optional!
try:
# Retrieve driver name for additional functionality
self.tn.write(b"info\n")
reply = self.tn.read_until(b"\n",3).strip().decode("ascii")
# When the LCDd driver doesn't supply a valid string, inform and return
if reply == "":
log(LOGINFO, "Empty driver information reply")
return
log(LOGINFO, "Driver information reply: " + reply)
if re.match(rematch_imon, reply):
log(LOGINFO, "SoundGraph iMON LCD detected")
if bUseExtraIcons:
self.m_cExtraIcons = LCDproc_extra_imon()
# override bigdigits counter, the imonlcd driver handles bigdigits
# different: digits count for two columns instead of three
self.m_iBigDigits = 7
elif re.match(rematch_mdm166a, reply):
log(LOGINFO, "Futaba/Targa USB mdm166a VFD detected")
if bUseExtraIcons:
self.m_cExtraIcons = LCDproc_extra_mdm166a()
elif re.match(rematch_imonvfd, reply):
log(LOGINFO, "SoundGraph iMON IR/VFD detected")
if self.m_cExtraIcons is not None:
self.m_cExtraIcons.Initialize()
except:
pass
def Connect(self):
self.CloseSocket()
try:
ip = self.m_Settings.getHostIp()
port = self.m_Settings.getHostPort()
log(LOGDEBUG,"Open " + str(ip) + ":" + str(port))
self.tn.open(ip, port)
# Start a new session
self.tn.write(b"hello\n")
# Receive LCDproc data to determine row and column information
reply = self.tn.read_until(b"\n",3).decode("ascii")
log(LOGDEBUG,"Reply: " + reply)
# parse reply by regex
lcdinfo = re.match("^connect .+ protocol ([0-9\.]+) lcd wid (\d+) hgt (\d+) cellwid (\d+) cellhgt (\d+)$", reply)
# if regex didn't match, LCDproc is incompatible or something's odd
if lcdinfo is None:
return False
# protocol version must currently either be 0.3 or 0.4
if float(lcdinfo.group(1)) not in [0.3, 0.4]:
log(LOGERROR, "Only LCDproc protocols 0.3 and 0.4 supported (got " + lcdinfo.group(1) +")")
return False
# set up class vars
self.m_iColumns = int(lcdinfo.group(2))
self.m_iRows = int(lcdinfo.group(3))
self.m_iCellWidth = int(lcdinfo.group(4))
self.m_iCellHeight = int(lcdinfo.group(5))
# tell users what's going on
log(LOGINFO, "Connected to LCDd at %s:%s, Protocol version %s - Geometry %sx%s characters (%sx%s pixels, %sx%s pixels per character)" % (str(ip), str(port), float(lcdinfo.group(1)), str(self.m_iColumns), str(self.m_iRows), str(self.m_iColumns * self.m_iCellWidth), str(self.m_iRows * self.m_iCellHeight), str(self.m_iCellWidth), str(self.m_iCellHeight)))
# Set up BigNum values based on display geometry
if self.m_iColumns < 13:
self.m_iBigDigits = 0 # No clock
elif self.m_iColumns < 17:
self.m_iBigDigits = 5 # HH:MM
elif self.m_iColumns < 20:
self.m_iBigDigits = 7 # H:MM:SS on play, HH:MM on clock
else:
self.m_iBigDigits = 8 # HH:MM:SS
# Check LCDproc if we can enable any extras or override values
# (might override e.g. m_iBigDigits!)
self.DetermineExtraSupport()
except:
log(LOGERROR,"Connect: Caught exception, aborting.")
return False
# retrieve raw socket object
self.tnsocket = self.tn.get_socket()
if self.tnsocket is None:
log(LOGERROR, "Retrieval of socket object failed!")
return False
if not self.SetupScreen():
log(LOGERROR, "Screen setup failed!")
return False
return True
def CloseSocket(self):
if self.tnsocket:
# no pyexceptions, please, we're disconnecting anyway
try:
# if we served extra elements, (try to) reset them
if self.m_cExtraIcons is not None:
if not self.SendCommand(self.m_cExtraIcons.GetClearAllCmd(), True):
log(LOGERROR, "CloseSocket(): Cannot clear extra icons")
# do gracefully disconnect (send directly as we won't get any response on this)
self.tn.write(b"bye\n")
# and close socket afterwards
self.tn.close()
except:
# exception caught on this, so what? :)
pass
# delete/cleanup extra support instance
del self.m_cExtraIcons
self.m_cExtraIcons = None
self.tnsocket = None
del self.tn
self.tn = telnetlib.Telnet()
def IsConnected(self):
if not self.tnsocket:
return False
# Ping only every SocketIdleTimeout seconds
if (self.m_timeLastSockAction + self.m_timeSocketIdleTimeout) > time.time():
return True
if not self.SendCommand(b"noop", True):
log(LOGERROR, "noop failed in IsConnected(), aborting!")
return False
return True
def SetBackLight(self, iLight):
if not self.tnsocket:
return
log(LOGDEBUG, "Switch Backlight to: " + str(iLight))
# Build command
if iLight == 0:
cmd = b"screen_set xbmc -backlight off\n"
elif iLight > 0:
cmd = b"screen_set xbmc -backlight on\n"
# Send to server
if not self.SendCommand(cmd, True):
log(LOGERROR, "SetBackLight(): Cannot change backlight state")
self.CloseSocket()
def SetContrast(self, iContrast):
#TODO: Not sure if you can control contrast from client
return
def Stop(self):
self.CloseSocket()
self.m_bStop = True
def Suspend(self):
if self.m_bStop or not self.tnsocket:
return
# Build command to suspend screen
cmd = b"screen_set xbmc -priority hidden\n"
# Send to server
if not self.SendCommand(cmd, True):
log(LOGERROR, "Suspend(): Cannot suspend")
self.CloseSocket()
def Resume(self):
if self.m_bStop or not self.tnsocket:
return
# Build command to resume screen
cmd = b"screen_set xbmc -priority info\n"
# Send to server
if not self.SendCommand(cmd, True):
log(LOGERROR, "Resume(): Cannot resume")
self.CloseSocket()
def GetColumns(self):
return int(self.m_iColumns)
def GetBigDigitTime(self, mode):
ret = ""
if self.m_InfoLabels.IsPlayerPlaying():
if not (mode == LCD_MODE.LCD_MODE_SCREENSAVER and self.m_InfoLabels.IsPlayerPaused()):
ret = self.m_InfoLabels.GetPlayerTime()[-self.m_iBigDigits:]
if ret == "": # no usable timestring, e.g. not playing anything
strSysTime = self.m_InfoLabels.GetSystemTime()
if self.m_iBigDigits >= 8: # return h:m:s
ret = strSysTime
elif self.m_iBigDigits >= 5: # return h:m when display too small
ret = strSysTime[:5]
return ret
def SetBigDigits(self, strTimeString, bForceUpdate):
iOffset = 1
iDigitCount = 1
iStringOffset = 0
strRealTimeString = ""
if strTimeString == "" or strTimeString == None:
return
iStringLength = int(len(strTimeString))
if self.m_bCenterBigDigits:
iColons = strTimeString.count(":")
iWidth = 3 * (iStringLength - iColons) + iColons
iOffset = 1 + max(self.m_iColumns - iWidth, 0) / 2
if iStringLength > self.m_iBigDigits:
iStringOffset = len(strTimeString) - self.m_iBigDigits
iOffset = 1;
if self.m_iOffset != iOffset:
# on offset change force redraw
bForceUpdate = True
self.m_iOffset = iOffset
for i in range(int(iStringOffset), int(iStringLength)):
if self.m_strDigits[iDigitCount] != strTimeString[i] or bForceUpdate:
self.m_strDigits[iDigitCount] = strTimeString[i]
if strTimeString[i] == ":":
self.m_bstrSetLineCmds += b"widget_set xbmc lineBigDigit%i %i 10\n" % (iDigitCount, iOffset)
elif strTimeString[i].isdigit():
self.m_bstrSetLineCmds += b"widget_set xbmc lineBigDigit%i %i %s\n" % (iDigitCount, iOffset, strTimeString[i].encode(self.m_strLCDEncoding))
else:
self.m_bstrSetLineCmds += b"widget_set xbmc lineBigDigit%i 0 0\n" % (iDigitCount)
if strTimeString[i] == ":":
iOffset += 1
else:
iOffset += 3
iDigitCount += 1
while iDigitCount <= self.m_iBigDigits:
if self.m_strDigits[iDigitCount] != "" or bForceUpdate:
self.m_strDigits[iDigitCount] = ""
self.m_bstrSetLineCmds += b"widget_set xbmc lineBigDigit%i 0 0\n" % (iDigitCount)
iDigitCount += 1
def SetProgressBar(self, percent, pxWidth):
self.m_iProgressBarWidth = int(float(percent) * pxWidth)
return self.m_iProgressBarWidth
def SetPlayingStateIcon(self):
bPlaying = self.m_InfoLabels.IsPlayerPlaying()
bPaused = self.m_InfoLabels.IsPlayerPaused()
bForwarding = self.m_InfoLabels.IsPlayerForwarding()
bRewinding = self.m_InfoLabels.IsPlayerRewinding()
self.m_bstrIconName = b"STOP"
if bForwarding:
self.m_bstrIconName = b"FF"
elif bRewinding:
self.m_bstrIconName = b"FR"
elif bPaused:
self.m_bstrIconName = b"PAUSE"
elif bPlaying:
self.m_bstrIconName = b"PLAY"
def GetRows(self):
return int(self.m_iRows)
def ClearBigDigits(self, fullredraw = True):
for i in range(1,int(self.m_iBigDigits + 1)):
# Clear Digit
if fullredraw:
self.m_bstrSetLineCmds += b"widget_set xbmc lineBigDigit%i 0 0\n" % (i)
self.m_strDigits[i] = ""
# on full redraw, make sure all widget get redrawn by resetting their type
if fullredraw:
for i in range(0, int(self.GetRows())):
self.m_strLineType[i] = ""
self.m_strLineText[i] = ""
self.m_bstrLineIcon[i] = b""
def ClearLine(self, iLine):
self.m_bstrSetLineCmds += b"widget_set xbmc lineIcon%i 0 0 BLOCK_FILLED\n" % (iLine)
self.m_bstrSetLineCmds += b"widget_set xbmc lineProgress%i 0 0 0\n" % (iLine)
self.m_bstrSetLineCmds += b"widget_set xbmc lineScroller%i 1 %i %i %i m 1 \"\"\n" % (iLine, iLine, self.m_iColumns, iLine)
def SetLine(self, mode, iLine, strLine, dictDescriptor, bForce):
if self.m_bStop or not self.tnsocket:
return
if iLine < 0 or iLine >= int(self.m_iRows):
return
plTime = self.m_InfoLabels.GetPlayerTime()
plDuration = self.m_InfoLabels.GetPlayerDuration()
ln = iLine + 1
bExtraForce = False
drawLineText = False
if self.m_strLineType[iLine] != dictDescriptor['type']:
if dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_BIGSCREEN:
self.ClearDisplay()
else:
if self.m_strLineType[iLine] == LCD_LINETYPE.LCD_LINETYPE_BIGSCREEN:
self.ClearBigDigits()
else:
self.ClearLine(int(iLine + 1))
self.m_strLineType[iLine] = dictDescriptor['type']
bExtraForce = True
if dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_PROGRESS and dictDescriptor['text'] != "":
self.m_bstrSetLineCmds += b"widget_set xbmc lineScroller%i 1 %i %i %i m 1 \"%s\"\n" % (ln, ln, self.m_iColumns, ln, dictDescriptor['text'].encode(self.m_strLCDEncoding))
if dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_PROGRESSTIME and dictDescriptor['text'] != "":
self.m_bstrSetLineCmds += b"widget_set xbmc lineScroller%i 1 %i %i %i m 1 \"%s\"\n" % (ln, ln, self.m_iColumns, ln, dictDescriptor['text'].encode(self.m_strLCDEncoding))
if dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_BIGSCREEN:
strLineLong = self.GetBigDigitTime(mode)
elif dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_PROGRESSTIME:
strLineLong = plTime + self.m_bProgressbarBlank * (self.m_iColumns - len(plTime) - len(plDuration)) + plDuration
else:
strLineLong = strLine
strLineLong.strip()
iMaxLineLen = dictDescriptor['endx'] - (int(dictDescriptor['startx']) - 1)
iScrollSpeed = self.m_Settings.getScrollDelay()
bstrScrollMode = self.m_Settings.getLCDprocScrollMode().encode(self.m_strLCDEncoding)
if len(strLineLong) > iMaxLineLen: # if the string doesn't fit the display...
if iScrollSpeed != 0: # add separator when scrolling enabled
if bstrScrollMode == b"m": # and scrollmode is marquee
strLineLong += self.m_strScrollSeparator
else: # or cut off
strLineLong = strLineLong[:iMaxLineLen]
iScrollSpeed = 1
iStartX = dictDescriptor['startx']
# check if update is required
if strLineLong != self.m_strLineText[iLine] or bForce:
# bigscreen
if dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_BIGSCREEN:
self.SetBigDigits(strLineLong, bExtraForce)
# progressbar line
elif dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_PROGRESS:
self.m_bstrSetLineCmds += b"widget_set xbmc lineProgress%i %i %i %i\n" % (ln, iStartX, ln, self.m_iProgressBarWidth)
# progressbar line with time
elif dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_PROGRESSTIME:
drawLineText = True
pLenFract = float(self.m_iColumns - int(len(plDuration) + len(plTime))) / self.m_iColumns
pTimeLen = int(self.m_iProgressBarWidth * pLenFract)
self.m_bstrSetLineCmds += b"widget_set xbmc lineProgress%i %i %i %i\n" % (ln, iStartX + len(plTime), ln, pTimeLen)
# everything else (text, icontext)
else:
drawLineText = True
if len(strLineLong) < iMaxLineLen and dictDescriptor['align'] != LCD_LINEALIGN.LCD_LINEALIGN_LEFT:
iSpaces = iMaxLineLen - len(strLineLong)
if dictDescriptor['align'] == LCD_LINEALIGN.LCD_LINEALIGN_RIGHT:
iStartX += iSpaces
elif dictDescriptor['align'] == LCD_LINEALIGN.LCD_LINEALIGN_CENTER:
iStartX += int(iSpaces / 2)
if drawLineText:
self.m_bstrSetLineCmds += b"widget_set xbmc lineScroller%i %i %i %i %i %s %i \"%s\"\n" % (ln, iStartX, ln, self.m_iColumns, ln, bstrScrollMode, iScrollSpeed, re.escape(strLineLong.encode(self.m_strLCDEncoding, errors="replace")))
# cache contents
self.m_strLineText[iLine] = strLineLong
if dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_ICONTEXT:
if self.m_bstrLineIcon[iLine] != self.m_bstrIconName or bExtraForce:
self.m_bstrLineIcon[iLine] = self.m_bstrIconName
self.m_bstrSetLineCmds += b"widget_set xbmc lineIcon%i 1 %i %s\n" % (ln, ln, self.m_bstrIconName)
def ClearDisplay(self):
log(LOGDEBUG, "Clearing display contents")
# clear line buffer first
self.FlushLines()
# set all widgets to empty stuff and/or offscreen
for i in range(1,int(self.m_iRows)+1):
self.ClearLine(i)
# add commands to clear big digits
self.ClearBigDigits()
# send to display
self.FlushLines()
def FlushLines(self):
if len(self.m_bstrSetLineCmds) > 0:
# Send complete command package
self.SendCommand(self.m_bstrSetLineCmds, False)
self.m_bstrSetLineCmds = b""
|
gpl-2.0
|
mrquim/repository.mrquim
|
script.module.livestreamer/lib/livestreamer/plugins/filmon_us.py
|
34
|
4358
|
import re
from livestreamer.compat import urlparse
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http, validate
from livestreamer.plugin.api.utils import parse_json, parse_query
from livestreamer.stream import RTMPStream, HTTPStream
SWF_LIVE_URL = "https://www.filmon.com/tv/modules/FilmOnTV/files/flashapp/filmon/FilmonPlayer.swf"
SWF_VIDEO_URL = "http://www.filmon.us/application/themes/base/flash/MediaPlayer.swf"
_url_re = re.compile("http(s)?://(\w+\.)?filmon.us")
_live_export_re = re.compile(
"<iframe src=\"(https://www.filmon.com/channel/export[^\"]+)\""
)
_live_json_re = re.compile("var startupChannel = (.+);")
_replay_json_re = re.compile("var standByVideo = encodeURIComponent\('(.+)'\);")
_history_re = re.compile(
"helpers.common.flash.flashplayerinstall\({url:'([^']+)',"
)
_video_flashvars_re = re.compile(
"<embed width=\"486\" height=\"326\" flashvars=\"([^\"]+)\""
)
_live_schema = validate.Schema({
"streams": [{
"name": validate.text,
"quality": validate.text,
"url": validate.url(scheme="rtmp")
}]
})
_schema = validate.Schema(
validate.union({
"export_url": validate.all(
validate.transform(_live_export_re.search),
validate.any(
None,
validate.get(1),
)
),
"video_flashvars": validate.all(
validate.transform(_video_flashvars_re.search),
validate.any(
None,
validate.all(
validate.get(1),
validate.transform(parse_query),
{
"_111pix_serverURL": validate.url(scheme="rtmp"),
"en_flash_providerName": validate.text
}
)
)
),
"history_video": validate.all(
validate.transform(_history_re.search),
validate.any(
None,
validate.all(
validate.get(1),
validate.url(scheme="http")
)
)
),
"standby_video": validate.all(
validate.transform(_replay_json_re.search),
validate.any(
None,
validate.all(
validate.get(1),
validate.transform(parse_json),
[{
"streamName": validate.url(scheme="http")
}]
)
)
)
})
)
class Filmon_us(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _get_live_stream(self, export_url):
res = http.get(export_url)
match = _live_json_re.search(res.text)
if not match:
return
json = parse_json(match.group(1), schema=_live_schema)
streams = {}
for stream in json["streams"]:
stream_name = stream["quality"]
parsed = urlparse(stream["url"])
stream = RTMPStream(self.session, {
"rtmp": stream["url"],
"app": "{0}?{1}".format(parsed.path[1:], parsed.query),
"playpath": stream["name"],
"swfVfy": SWF_LIVE_URL,
"pageUrl": self.url,
"live": True
})
streams[stream_name] = stream
return streams
def _get_streams(self):
res = http.get(self.url, schema=_schema)
if res["export_url"]:
return self._get_live_stream(res["export_url"])
elif res["video_flashvars"]:
stream = RTMPStream(self.session, {
"rtmp": res["video_flashvars"]["_111pix_serverURL"],
"playpath": res["video_flashvars"]["en_flash_providerName"],
"swfVfy": SWF_VIDEO_URL,
"pageUrl": self.url
})
return dict(video=stream)
elif res["standby_video"]:
for stream in res["standby_video"]:
stream = HTTPStream(self.session, stream["streamName"])
return dict(replay=stream)
elif res["history_video"]:
stream = HTTPStream(self.session, res["history_video"])
return dict(history=stream)
return
__plugin__ = Filmon_us
|
gpl-2.0
|
maxalbert/ansible
|
v1/tests/TestFilters.py
|
107
|
5974
|
'''
Test bundled filters
'''
import os.path
import unittest, tempfile, shutil
from ansible import playbook, inventory, callbacks
import ansible.runner.filter_plugins.core
import ansible.runner.filter_plugins.mathstuff
INVENTORY = inventory.Inventory(['localhost'])
BOOK = '''
- hosts: localhost
vars:
var: { a: [1,2,3] }
tasks:
- template: src=%s dest=%s
'''
SRC = '''
-
{{ var|to_json }}
-
{{ var|to_nice_json }}
-
{{ var|to_yaml }}
-
{{ var|to_nice_yaml }}
'''
DEST = '''
-
{"a": [1, 2, 3]}
-
{
"a": [
1,
2,
3
]
}
-
a: [1, 2, 3]
-
a:
- 1
- 2
- 3
'''
class TestFilters(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(dir='/tmp')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def temp(self, name, data=''):
'''write a temporary file and return the name'''
name = self.tmpdir + '/' + name
with open(name, 'w') as f:
f.write(data)
return name
def test_bool_none(self):
a = ansible.runner.filter_plugins.core.bool(None)
assert a == None
def test_bool_true(self):
a = ansible.runner.filter_plugins.core.bool(True)
assert a == True
def test_bool_yes(self):
a = ansible.runner.filter_plugins.core.bool('Yes')
assert a == True
def test_bool_no(self):
a = ansible.runner.filter_plugins.core.bool('Foo')
assert a == False
def test_quotes(self):
a = ansible.runner.filter_plugins.core.quote('ls | wc -l')
assert a == "'ls | wc -l'"
def test_fileglob(self):
pathname = os.path.join(os.path.dirname(__file__), '*')
a = ansible.runner.filter_plugins.core.fileglob(pathname)
assert __file__ in a
def test_regex(self):
a = ansible.runner.filter_plugins.core.regex('ansible', 'ansible',
match_type='findall')
assert a == True
def test_match_case_sensitive(self):
a = ansible.runner.filter_plugins.core.match('ansible', 'ansible')
assert a == True
def test_match_case_insensitive(self):
a = ansible.runner.filter_plugins.core.match('ANSIBLE', 'ansible',
True)
assert a == True
def test_match_no_match(self):
a = ansible.runner.filter_plugins.core.match(' ansible', 'ansible')
assert a == False
def test_search_case_sensitive(self):
a = ansible.runner.filter_plugins.core.search(' ansible ', 'ansible')
assert a == True
def test_search_case_insensitive(self):
a = ansible.runner.filter_plugins.core.search(' ANSIBLE ', 'ansible',
True)
assert a == True
def test_regex_replace_case_sensitive(self):
a = ansible.runner.filter_plugins.core.regex_replace('ansible', '^a.*i(.*)$',
'a\\1')
assert a == 'able'
def test_regex_replace_case_insensitive(self):
a = ansible.runner.filter_plugins.core.regex_replace('ansible', '^A.*I(.*)$',
'a\\1', True)
assert a == 'able'
def test_regex_replace_no_match(self):
a = ansible.runner.filter_plugins.core.regex_replace('ansible', '^b.*i(.*)$',
'a\\1')
assert a == 'ansible'
def test_to_uuid(self):
a = ansible.runner.filter_plugins.core.to_uuid('example.com')
assert a == 'ae780c3a-a3ab-53c2-bfb4-098da300b3fe'
#def test_filters(self):
# this test is pretty low level using a playbook, hence I am disabling it for now -- MPD.
#return
#src = self.temp('src.j2', SRC)
#dest = self.temp('dest.txt')
#book = self.temp('book', BOOK % (src, dest))
#playbook.PlayBook(
# playbook = book,
# inventory = INVENTORY,
# transport = 'local',
# callbacks = callbacks.PlaybookCallbacks(),
# runner_callbacks = callbacks.DefaultRunnerCallbacks(),
# stats = callbacks.AggregateStats(),
#).run()
#out = open(dest).read()
#self.assertEqual(DEST, out)
def test_version_compare(self):
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(0, 1.1, 'lt', False))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.1, 1.2, '<'))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.2, '=='))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.2, '='))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.2, 'eq'))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, 'gt'))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, '>'))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, 'ne'))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, '!='))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, '<>'))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.1, 1.1, 'ge'))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.1, '>='))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.1, 1.1, 'le'))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.0, 1.1, '<='))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare('12.04', 12, 'ge'))
def test_min(self):
a = ansible.runner.filter_plugins.mathstuff.min([3, 2, 5, 4])
assert a == 2
def test_max(self):
a = ansible.runner.filter_plugins.mathstuff.max([3, 2, 5, 4])
assert a == 5
|
gpl-3.0
|
Datera/cinder
|
cinder/policies/groups.py
|
3
|
2089
|
# Copyright (c) 2017 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from cinder.policies import base
CREATE_POLICY = 'group:create'
UPDATE_POLICY = 'group:update'
GET_POLICY = 'group:get'
GET_ALL_POLICY = 'group:get_all'
groups_policies = [
policy.DocumentedRuleDefault(
name=GET_ALL_POLICY,
check_str=base.RULE_ADMIN_OR_OWNER,
description="List groups.",
operations=[
{
'method': 'GET',
'path': '/groups'
},
{
'method': 'GET',
'path': '/groups/detail'
}
]),
policy.DocumentedRuleDefault(
name=CREATE_POLICY,
check_str="",
description="Create group.",
operations=[
{
'method': 'POST',
'path': '/groups'
}
]),
policy.DocumentedRuleDefault(
name=GET_POLICY,
check_str=base.RULE_ADMIN_OR_OWNER,
description="Show group.",
operations=[
{
'method': 'GET',
'path': '/groups/{group_id}'
}
]),
policy.DocumentedRuleDefault(
name=UPDATE_POLICY,
check_str=base.RULE_ADMIN_OR_OWNER,
description="Update group.",
operations=[
{
'method': 'PUT',
'path': '/groups/{group_id}'
}
]),
]
def list_rules():
return groups_policies
|
apache-2.0
|
deepmind/deep-verify
|
deep_verify/src/auto_verifier.py
|
1
|
9808
|
# coding=utf-8
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Automatic construction of verifiable layers from a Sonnet module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from deep_verify.src.layers import layers
import interval_bound_propagation as ibp
import sonnet as snt
class NotVerifiableError(Exception):
"""Module's graph contains features that do not map to verification layers."""
class VerifiableLayerBuilder(object):
"""Constructs verifiable layers from a Sonnet module."""
def __init__(self, network):
"""Constructor.
Args:
network: `NetworkBuilder` containing network with propagated bounds.
"""
super(VerifiableLayerBuilder, self).__init__()
self._network = network
def build_layers(self):
"""Builds the verifiable layers.
Returns:
List of `SingleVerifiableLayer` for the module.
Raises:
NotVerifiableError: on invalid layer arrangement.
"""
backstop_node, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(self._network.output_module))
if (not isinstance(backstop_node, ibp.ModelInputWrapper) or
self._network.fanout_of(backstop_node) != known_fanout):
raise NotVerifiableError('Invalid connectivity')
if reshape:
raise NotVerifiableError('Cannot end with a reshape operation')
return self._fuse(verifiable_layers)
def _build_layers_rec(self, node, known_fanout=1, batchnorm_node=None):
"""Builds verifiable layers leading up to the given layer output.
The list is constructed by navigating the layers in reverse order,
stopping either when the module's original inputs are reached,
or (for within a ResNet block) when a layer is encountered that has
outputs not processed by this navigation.
Args:
node: Layer output, up to which to build verifiable layers.
known_fanout: Number of immediate outputs of `layer_tensor` that have
already been processed by the caller.
This is typically 1, but sub-classes may invoke with 2 (or possibly
greater) where the network contains branches.
batchnorm_node: The BatchNorm's ConnectedSubgraph object if
`layer_tensor` is the input to a BatchNorm layer, otherwise None.
Returns:
backstop_node: Node, typically the `ibp.ModelInputWrapper`, at which we
stopped backtracking.
known_fanout: Number of immediate outputs of `input_tensor` that were
processed in this call.
This is typically 1, but overrides may return 2 (or possibly greater)
in the presence of branched architectures.
verifiable_layers: List of `SingleVerifiableLayer` whose final element's
output is `outputs`.
reshape: Whether the final element of `verifiable_layers` is followed by
a reshape operation.
Raises:
NotVerifiableError: on invalid layer arrangement.
"""
if (isinstance(node, ibp.ModelInputWrapper) or
self._network.fanout_of(node) != known_fanout):
# Reached the inputs (or start of the enclosing ResNet block).
# No more layers to construct.
if batchnorm_node:
raise NotVerifiableError('Cannot begin with batchnorm')
return node, known_fanout, [], False
elif (isinstance(node, ibp.IncreasingMonotonicWrapper) and
node.module.__name__ == 'identity'):
# Recursively build all preceding layers.
input_node, = self._network.dependencies(node)
return self._build_layers_rec(input_node, batchnorm_node=batchnorm_node)
elif (isinstance(node, ibp.IncreasingMonotonicWrapper) and
node.module.__name__ == 'avg_pool'):
# Recursively build all preceding layers.
input_node, = self._network.dependencies(node)
input_tensor, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(input_node))
# Construct the AvgPool layer.
if batchnorm_node:
raise NotVerifiableError('AvgPool cannot have batchnorm')
if node.parameters['padding'] == 'SAME':
raise ValueError('"SAME" padding is not supported.')
verifiable_layers.append(layers.AvgPool(
input_node,
node,
kernel_shape=node.parameters['ksize'][1:-1],
strides=node.parameters['strides'][1:-1],
reshape=reshape))
return input_tensor, known_fanout, verifiable_layers, False
elif (isinstance(node, ibp.IncreasingMonotonicWrapper) and
node.module.__name__ == 'reduce_mean'):
# Recursively build all preceding layers.
input_node, = self._network.dependencies(node)
input_tensor, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(input_node))
# Construct the AvgPool layer.
if batchnorm_node:
raise NotVerifiableError('AvgPool cannot have batchnorm')
verifiable_layers.append(layers.AvgPool(
input_node,
node,
kernel_shape=None,
strides=None,
reshape=reshape))
return input_tensor, known_fanout, verifiable_layers, False
elif (isinstance(node, ibp.IncreasingMonotonicWrapper) and
node.module.__name__ == 'max_pool'):
# Recursively build all preceding layers.
input_node, = self._network.dependencies(node)
input_tensor, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(input_node))
# Construct the MaxPool layer.
if batchnorm_node:
raise NotVerifiableError('MaxPool cannot have batchnorm')
if node.parameters['padding'] == 'SAME':
raise ValueError('"SAME" padding is not supported.')
verifiable_layers.append(layers.MaxPool(
input_node,
node,
kernel_shape=node.parameters['ksize'][1:-1],
strides=node.parameters['strides'][1:-1],
reshape=reshape))
return input_tensor, known_fanout, verifiable_layers, False
elif (isinstance(node, ibp.IncreasingMonotonicWrapper) and
node.module.__name__ == 'reduce_max'):
# Recursively build all preceding layers.
input_node, = self._network.dependencies(node)
input_tensor, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(input_node))
# Construct the MaxPool layer.
if batchnorm_node:
raise NotVerifiableError('MaxPool cannot have batchnorm')
verifiable_layers.append(layers.MaxPool(
input_node,
node,
kernel_shape=None,
strides=None,
reshape=reshape))
return input_tensor, known_fanout, verifiable_layers, False
elif isinstance(node.module, snt.BatchNorm):
# Construct the previous layer with batchnorm.
if batchnorm_node:
raise NotVerifiableError('Cannot have consecutive batchnorms')
input_node, = self._network.dependencies(node)
return self._build_layers_rec(input_node, batchnorm_node=node)
elif isinstance(node.module, snt.BatchReshape):
# Recursively build all preceding layers.
input_node, = self._network.dependencies(node)
backstop_node, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(input_node))
if batchnorm_node:
raise NotVerifiableError('Reshape cannot have batchnorm')
return backstop_node, known_fanout, verifiable_layers, True
else:
# Recursively build all preceding layers.
input_nodes = self._network.dependencies(node)
if len(input_nodes) != 1:
raise NotVerifiableError('Unary operation expected')
input_node, = input_nodes
backstop_node, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(input_node))
# Construct the layer.
verifiable_layers.append(layers.create_verifiable_layer(
input_node,
batchnorm_node or node,
node.module,
batch_norm=(batchnorm_node.module if batchnorm_node else None),
reshape=reshape,
parameters=(node.parameters
if isinstance(node, ibp.IncreasingMonotonicWrapper)
else None),
))
return backstop_node, known_fanout, verifiable_layers, False
def _fuse(self, verifiable_layers):
"""Performs fusion of certain layer pairs."""
fused_layers = []
idx = 0
while idx < len(verifiable_layers):
if (idx+2 <= len(verifiable_layers) and
isinstance(verifiable_layers[idx], layers.MaxPool) and
isinstance(verifiable_layers[idx+1], layers.Activation) and
verifiable_layers[idx+1].activation == 'relu'):
# Fuse maxpool with relu.
original = verifiable_layers[idx]
fused_layers.append(layers.MaxPool(original.input_node,
original.output_node,
kernel_shape=original.kernel_shape,
strides=original.strides,
with_relu=True,
reshape=original.reshape))
idx += 2
else:
fused_layers.append(verifiable_layers[idx])
idx += 1
return fused_layers
|
apache-2.0
|
SafeW3rd/Ciphers
|
simpleSubHacker.py
|
1
|
7029
|
# Simple Substitution Cipher Hacker
# http://inventwithpython.com/hacking (BSD Licensed)
import os, re, copy, pprint, pyperclip, simpleSubCipher, makeWordPatterns
if not os.path.exists('wordPatterns.py'):
makeWordPatterns.main() # create the wordPatterns.py file
import wordPatterns
LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
nonLettersOrSpacePattern = re.compile('[^A-Z\s]')
def main():
message = 'Sy l nlx sr pyyacao l ylwj eiswi upar lulsxrj isr sxrjsxwjr, ia esmm rwctjsxsza sj wmpramh, lxo txmarr jia aqsoaxwa sr pqaceiamnsxu, ia esmm caytra jp famsaqa sj. Sy, px jia pjiac ilxo, ia sr pyyacao rpnajisxu eiswi lyypcor l calrpx ypc lwjsxu sx lwwpcolxwa jp isr sxrjsxwjr, ia esmm lwwabj sj aqax px jia rmsuijarj aqsoaxwa. Jia pcsusx py nhjir sr agbmlsxao sx jisr elh. -Facjclxo Ctrramm'
# Determine the possible valid ciphertext translations.
print('Hacking...')
letterMapping = hackSimpleSub(message)
# Display the results to the user.
print('Mapping:')
pprint.pprint(letterMapping)
print()
print('Original ciphertext:')
print(message)
print()
print('Copying hacked message to clipboard:')
hackedMessage = decryptWithCipherletterMapping(message, letterMapping)
pyperclip.copy(hackedMessage)
print(hackedMessage)
def getBlankCipherletterMapping():
# Returns a dictionary value that is a blank cipherletter mapping.
return {'A': [], 'B': [], 'C': [], 'D': [], 'E': [], 'F': [], 'G': [], 'H': [], 'I': [], 'J': [], 'K': [], 'L': [], 'M': [], 'N': [], 'O': [], 'P': [], 'Q': [], 'R': [], 'S': [], 'T': [], 'U': [], 'V': [], 'W': [], 'X': [], 'Y': [], 'Z': []}
def addLettersToMapping(letterMapping, cipherword, candidate):
# The letterMapping parameter is a "cipherletter mapping" dictionary
# value that the return value of this function starts as a copy of.
# The cipherword parameter is a string value of the ciphertext word.
# The candidate parameter is a possible English word that the
# cipherword could decrypt to.
# This function adds the letters of the candidate as potential
# decryption letters for the cipherletters in the cipherletter
# mapping.
letterMapping = copy.deepcopy(letterMapping)
for i in range(len(cipherword)):
if candidate[i] not in letterMapping[cipherword[i]]:
letterMapping[cipherword[i]].append(candidate[i])
return letterMapping
def intersectMappings(mapA, mapB):
# To intersect two maps, create a blank map, and then add only the
# potential decryption letters if they exist in BOTH maps.
intersectedMapping = getBlankCipherletterMapping()
for letter in LETTERS:
# An empty list means "any letter is possible". In this case just
# copy the other map entirely.
if mapA[letter] == []:
intersectedMapping[letter] = copy.deepcopy(mapB[letter])
elif mapB[letter] == []:
intersectedMapping[letter] = copy.deepcopy(mapA[letter])
else:
# If a letter in mapA[letter] exists in mapB[letter], add
# that letter to intersectedMapping[letter].
for mappedLetter in mapA[letter]:
if mappedLetter in mapB[letter]:
intersectedMapping[letter].append(mappedLetter)
return intersectedMapping
def removeSolvedLettersFromMapping(letterMapping):
# Cipher letters in the mapping that map to only one letter are
# "solved" and can be removed from the other letters.
# For example, if 'A' maps to potential letters ['M', 'N'], and 'B'
# maps to ['N'], then we know that 'B' must map to 'N', so we can
# remove 'N' from the list of what 'A' could map to. So 'A' then maps
# to ['M']. Note that now that 'A' maps to only one letter, we can
# remove 'M' from the list of letters for every other
# letter. (This is why there is a loop that keeps reducing the map.)
letterMapping = copy.deepcopy(letterMapping)
loopAgain = True
while loopAgain:
# First assume that we will not loop again:
loopAgain = False
# solvedLetters will be a list of uppercase letters that have one
# and only one possible mapping in letterMapping
solvedLetters = []
for cipherletter in LETTERS:
if len(letterMapping[cipherletter]) == 1:
solvedLetters.append(letterMapping[cipherletter][0])
# If a letter is solved, than it cannot possibly be a potential
# decryption letter for a different ciphertext letter, so we
# should remove it from those other lists.
for cipherletter in LETTERS:
for s in solvedLetters:
if len(letterMapping[cipherletter]) != 1 and s in letterMapping[cipherletter]:
letterMapping[cipherletter].remove(s)
if len(letterMapping[cipherletter]) == 1:
# A new letter is now solved, so loop again.
loopAgain = True
return letterMapping
def hackSimpleSub(message):
intersectedMap = getBlankCipherletterMapping()
cipherwordList = nonLettersOrSpacePattern.sub('', message.upper()).split()
for cipherword in cipherwordList:
# Get a new cipherletter mapping for each ciphertext word.
newMap = getBlankCipherletterMapping()
wordPattern = makeWordPatterns.getWordPattern(cipherword)
if wordPattern not in wordPatterns.allPatterns:
continue # This word was not in our dictionary, so continue.
# Add the letters of each candidate to the mapping.
for candidate in wordPatterns.allPatterns[wordPattern]:
newMap = addLettersToMapping(newMap, cipherword, candidate)
# Intersect the new mapping with the existing intersected mapping.
intersectedMap = intersectMappings(intersectedMap, newMap)
# Remove any solved letters from the other lists.
return removeSolvedLettersFromMapping(intersectedMap)
def decryptWithCipherletterMapping(ciphertext, letterMapping):
# Return a string of the ciphertext decrypted with the letter mapping,
# with any ambiguous decrypted letters replaced with an _ underscore.
# First create a simple sub key from the letterMapping mapping.
key = ['x'] * len(LETTERS)
for cipherletter in LETTERS:
if len(letterMapping[cipherletter]) == 1:
# If there's only one letter, add it to the key.
keyIndex = LETTERS.find(letterMapping[cipherletter][0])
key[keyIndex] = cipherletter
else:
ciphertext = ciphertext.replace(cipherletter.lower(), '_')
ciphertext = ciphertext.replace(cipherletter.upper(), '_')
key = ''.join(key)
# With the key we've created, decrypt the ciphertext.
return simpleSubCipher.decryptMessage(key, ciphertext)
if __name__ == '__main__':
main()
|
mit
|
max-arnold/django-cities-light
|
setup.py
|
3
|
1594
|
from setuptools import setup, find_packages
import shutil
import sys
import os
import os.path
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='django-cities-light',
version='3.3.0',
description='Simple alternative to django-cities',
author='James Pic,Dominick Rivard,Alexey Evseev',
author_email='jamespic@gmail.com, dominick.rivard@gmail.com, myhappydo@gmail.com',
url='https://github.com/yourlabs/django-cities-light',
packages=['cities_light'],
include_package_data=True,
zip_safe=False,
long_description=read('README.rst'),
license='MIT',
keywords='django cities countries postal codes',
install_requires=[
'six',
'unidecode>=0.04.13',
'django_autoslug',
'progressbar2>=3.6.0'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
mit
|
fle-internal/content-curation
|
contentcuration/contentcuration/management/commands/exportchannel.py
|
1
|
27865
|
import collections
import itertools
import json
import logging as logmodule
import os
import re
import sys
import tempfile
import uuid
import zipfile
from itertools import chain
from django.conf import settings
from django.core.files import File
from django.core.files.storage import default_storage as storage
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.db import transaction
from django.db.models import Count
from django.db.models import Q
from django.db.models import Sum
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from kolibri.content.utils.search import fuzz
from kolibri_content import models as kolibrimodels
from kolibri_content.router import get_active_content_database
from kolibri_content.router import using_content_database
from le_utils.constants import content_kinds
from le_utils.constants import exercises
from le_utils.constants import file_formats
from le_utils.constants import format_presets
from le_utils.constants import roles
from contentcuration import models as ccmodels
from contentcuration.statistics import record_publish_stats
from contentcuration.utils.files import create_thumbnail_from_base64
from contentcuration.utils.files import get_thumbnail_encoding
from contentcuration.utils.parser import extract_value
from contentcuration.utils.parser import load_json_string
logmodule.basicConfig()
logging = logmodule.getLogger(__name__)
reload(sys)
sys.setdefaultencoding('utf8')
PERSEUS_IMG_DIR = exercises.IMG_PLACEHOLDER + "/images"
THUMBNAIL_DIMENSION = 128
MIN_SCHEMA_VERSION = "1"
class EarlyExit(BaseException):
def __init__(self, message, db_path):
self.message = message
self.db_path = db_path
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('channel_id', type=str)
parser.add_argument('--force', action='store_true', dest='force', default=False)
parser.add_argument('--user_id', dest='user_id', default=None)
parser.add_argument('--force-exercises', action='store_true', dest='force-exercises', default=False)
# optional argument to send an email to the user when done with exporting channel
parser.add_argument('--email', action='store_true', default=False)
def handle(self, *args, **options):
# license_id = options['license_id']
channel_id = options['channel_id']
force = options['force']
send_email = options['email']
user_id = options['user_id']
force_exercises = options['force-exercises']
channel = ccmodels.Channel.objects.get(pk=channel_id)
# license = ccmodels.License.objects.get(pk=license_id)
try:
create_content_database(channel_id, force, user_id, force_exercises)
increment_channel_version(channel)
mark_all_nodes_as_changed(channel)
add_tokens_to_channel(channel)
fill_published_fields(channel)
# Attributes not getting set for some reason, so just save it here
channel.main_tree.publishing = False
channel.main_tree.changed = False
channel.main_tree.published = True
channel.main_tree.save()
if send_email:
send_emails(channel, user_id)
# use SQLite backup API to put DB into archives folder.
# Then we can use the empty db name to have SQLite use a temporary DB (https://www.sqlite.org/inmemorydb.html)
record_publish_stats(channel)
except EarlyExit as e:
logging.warning("Exited early due to {message}.".format(message=e.message))
self.stdout.write("You can find your database in {path}".format(path=e.db_path))
# No matter what, make sure publishing is set to False once the run is done
finally:
channel.main_tree.publishing = False
channel.main_tree.save()
def send_emails(channel, user_id):
subject = render_to_string('registration/custom_email_subject.txt', {'subject': _('Kolibri Studio Channel Published')})
if user_id:
user = ccmodels.User.objects.get(pk=user_id)
message = render_to_string('registration/channel_published_email.txt', {'channel': channel, 'user': user})
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL, )
else:
# Email all users about updates to channel
for user in itertools.chain(channel.editors.all(), channel.viewers.all()):
message = render_to_string('registration/channel_published_email.txt', {'channel': channel, 'user': user})
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL, )
def create_content_database(channel_id, force, user_id, force_exercises):
channel = ccmodels.Channel.objects.get(pk=channel_id)
# increment the channel version
if not force:
raise_if_nodes_are_all_unchanged(channel)
fh, tempdb = tempfile.mkstemp(suffix=".sqlite3")
with using_content_database(tempdb):
channel.main_tree.publishing = True
channel.main_tree.save()
prepare_export_database(tempdb)
map_channel_to_kolibri_channel(channel)
map_content_nodes(channel.main_tree, channel.language, channel.id, channel.name, user_id=user_id, force_exercises=force_exercises)
map_prerequisites(channel.main_tree)
save_export_database(channel_id)
def create_kolibri_license_object(ccnode):
use_license_description = not ccnode.license.is_custom
return kolibrimodels.License.objects.get_or_create(
license_name=ccnode.license.license_name,
license_description=ccnode.license.license_description if use_license_description else ccnode.license_description
)
def increment_channel_version(channel):
channel.version += 1
channel.last_published = timezone.now()
channel.save()
def assign_license_to_contentcuration_nodes(channel, license):
channel.main_tree.get_family().update(license_id=license.pk)
def map_content_nodes(root_node, default_language, channel_id, channel_name, user_id=None, force_exercises=False):
# make sure we process nodes higher up in the tree first, or else when we
# make mappings the parent nodes might not be there
node_queue = collections.deque()
node_queue.append(root_node)
def queue_get_return_none_when_empty():
try:
return node_queue.popleft()
except IndexError:
return None
# kolibri_license = kolibrimodels.License.objects.get(license_name=license.license_name)
with transaction.atomic():
with ccmodels.ContentNode.objects.delay_mptt_updates():
for node in iter(queue_get_return_none_when_empty, None):
logging.debug("Mapping node with id {id}".format(
id=node.pk))
if node.get_descendants(include_self=True).exclude(kind_id=content_kinds.TOPIC).exists():
children = (node.children.all())
node_queue.extend(children)
kolibrinode = create_bare_contentnode(node, default_language, channel_id, channel_name)
if node.kind.kind == content_kinds.EXERCISE:
exercise_data = process_assessment_metadata(node, kolibrinode)
if force_exercises or node.changed or not node.files.filter(preset_id=format_presets.EXERCISE).exists():
create_perseus_exercise(node, kolibrinode, exercise_data, user_id=user_id)
create_associated_file_objects(kolibrinode, node)
map_tags_to_node(kolibrinode, node)
def create_bare_contentnode(ccnode, default_language, channel_id, channel_name):
logging.debug("Creating a Kolibri contentnode for instance id {}".format(
ccnode.node_id))
kolibri_license = None
if ccnode.license is not None:
kolibri_license = create_kolibri_license_object(ccnode)[0]
language = None
if ccnode.language or default_language:
language, _new = get_or_create_language(ccnode.language or default_language)
kolibrinode, is_new = kolibrimodels.ContentNode.objects.update_or_create(
pk=ccnode.node_id,
defaults={
'kind': ccnode.kind.kind,
'title': ccnode.title if ccnode.parent else channel_name,
'content_id': ccnode.content_id,
'channel_id': channel_id,
'author': ccnode.author or "",
'description': ccnode.description,
'sort_order': ccnode.sort_order,
'license_owner': ccnode.copyright_holder or "",
'license': kolibri_license,
'available': ccnode.get_descendants(include_self=True).exclude(kind_id=content_kinds.TOPIC).exists(), # Hide empty topics
'stemmed_metaphone': ' '.join(fuzz(ccnode.title + ' ' + ccnode.description)),
'lang': language,
'license_name': kolibri_license.license_name if kolibri_license is not None else None,
'license_description': kolibri_license.license_description if kolibri_license is not None else None,
'coach_content': ccnode.role_visibility == roles.COACH,
}
)
if ccnode.parent:
logging.debug("Associating {child} with parent {parent}".format(
child=kolibrinode.pk,
parent=ccnode.parent.node_id
))
kolibrinode.parent = kolibrimodels.ContentNode.objects.get(pk=ccnode.parent.node_id)
kolibrinode.save()
logging.debug("Created Kolibri ContentNode with node id {}".format(ccnode.node_id))
logging.debug("Kolibri node count: {}".format(kolibrimodels.ContentNode.objects.all().count()))
return kolibrinode
def get_or_create_language(language):
return kolibrimodels.Language.objects.get_or_create(
id=language.pk,
lang_code=language.lang_code,
lang_subcode=language.lang_subcode,
lang_name=language.lang_name if hasattr(language, 'lang_name') else language.native_name,
lang_direction=language.lang_direction
)
def create_associated_thumbnail(ccnode, ccfilemodel):
"""
Gets the appropriate thumbnail for export (uses or generates a base64 encoding)
Args:
ccnode (<ContentNode>): node to derive thumbnail from (if encoding is provided)
ccfilemodel (<File>): file to get thumbnail from if no encoding is available
Returns <File> model of encoded, resized thumbnail
"""
encoding = None
try:
encoding = ccnode.thumbnail_encoding and load_json_string(ccnode.thumbnail_encoding).get('base64')
except ValueError:
logging.error("ERROR: node thumbnail is not in correct format ({}: {})".format(ccnode.id, ccnode.thumbnail_encoding))
return
# Save the encoding if it doesn't already have an encoding
if not encoding:
try:
encoding = get_thumbnail_encoding(str(ccfilemodel))
except IOError:
# ImageMagick may raise an IOError if the file is not a thumbnail. Catch that then just return early.
logging.error("ERROR: cannot identify the thumbnail ({}: {})".format(ccnode.id, ccnode.thumbnail_encoding))
return
ccnode.thumbnail_encoding = json.dumps({
"base64": encoding,
"points": [],
"zoom": 0,
})
ccnode.save()
return create_thumbnail_from_base64(
encoding,
uploaded_by=ccfilemodel.uploaded_by,
file_format_id=ccfilemodel.file_format_id,
preset_id=ccfilemodel.preset_id
)
def create_associated_file_objects(kolibrinode, ccnode):
logging.debug("Creating LocalFile and File objects for Node {}".format(kolibrinode.id))
for ccfilemodel in ccnode.files.exclude(Q(preset_id=format_presets.EXERCISE_IMAGE) | Q(preset_id=format_presets.EXERCISE_GRAPHIE)):
preset = ccfilemodel.preset
fformat = ccfilemodel.file_format
if ccfilemodel.language:
get_or_create_language(ccfilemodel.language)
if preset.thumbnail:
ccfilemodel = create_associated_thumbnail(ccnode, ccfilemodel) or ccfilemodel
kolibrilocalfilemodel, new = kolibrimodels.LocalFile.objects.get_or_create(
pk=ccfilemodel.checksum,
defaults={
'extension': fformat.extension,
'file_size': ccfilemodel.file_size,
}
)
kolibrimodels.File.objects.create(
pk=ccfilemodel.pk,
checksum=ccfilemodel.checksum,
extension=fformat.extension,
available=True, # TODO: Set this to False, once we have availability stamping implemented in Kolibri
file_size=ccfilemodel.file_size,
contentnode=kolibrinode,
preset=preset.pk,
supplementary=preset.supplementary,
lang_id=ccfilemodel.language and ccfilemodel.language.pk,
thumbnail=preset.thumbnail,
priority=preset.order,
local_file=kolibrilocalfilemodel,
)
def create_perseus_exercise(ccnode, kolibrinode, exercise_data, user_id=None):
logging.debug("Creating Perseus Exercise for Node {}".format(ccnode.title))
filename = "{0}.{ext}".format(ccnode.title, ext=file_formats.PERSEUS)
temppath = None
try:
with tempfile.NamedTemporaryFile(suffix="zip", delete=False) as tempf:
temppath = tempf.name
create_perseus_zip(ccnode, exercise_data, tempf)
file_size = tempf.tell()
tempf.flush()
ccnode.files.filter(preset_id=format_presets.EXERCISE).delete()
assessment_file_obj = ccmodels.File.objects.create(
file_on_disk=File(open(temppath, 'r'), name=filename),
contentnode=ccnode,
file_format_id=file_formats.PERSEUS,
preset_id=format_presets.EXERCISE,
original_filename=filename,
file_size=file_size,
uploaded_by_id=user_id,
)
logging.debug("Created exercise for {0} with checksum {1}".format(ccnode.title, assessment_file_obj.checksum))
finally:
temppath and os.unlink(temppath)
def process_assessment_metadata(ccnode, kolibrinode):
# Get mastery model information, set to default if none provided
assessment_items = ccnode.assessment_items.all().order_by('order')
exercise_data = json.loads(ccnode.extra_fields) if ccnode.extra_fields else {}
randomize = exercise_data.get('randomize') if exercise_data.get('randomize') is not None else True
assessment_item_ids = [a.assessment_id for a in assessment_items]
mastery_model = {'type': exercise_data.get('mastery_model') or exercises.M_OF_N}
if mastery_model['type'] == exercises.M_OF_N:
mastery_model.update({'n': exercise_data.get('n') or min(5, assessment_items.count()) or 1})
mastery_model.update({'m': exercise_data.get('m') or min(5, assessment_items.count()) or 1})
elif mastery_model['type'] == exercises.DO_ALL:
mastery_model.update({'n': assessment_items.count() or 1, 'm': assessment_items.count() or 1})
elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_2:
mastery_model.update({'n': 2, 'm': 2})
elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_3:
mastery_model.update({'n': 3, 'm': 3})
elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_5:
mastery_model.update({'n': 5, 'm': 5})
elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_10:
mastery_model.update({'n': 10, 'm': 10})
exercise_data.update({
'mastery_model': exercises.M_OF_N,
'legacy_mastery_model': mastery_model['type'],
'randomize': randomize,
'n': mastery_model.get('n'),
'm': mastery_model.get('m'),
'all_assessment_items': assessment_item_ids,
'assessment_mapping': {a.assessment_id: a.type if a.type != 'true_false' else exercises.SINGLE_SELECTION.decode('utf-8') for a in assessment_items},
})
kolibrimodels.AssessmentMetaData.objects.create(
id=uuid.uuid4(),
contentnode=kolibrinode,
assessment_item_ids=json.dumps(assessment_item_ids),
number_of_assessments=assessment_items.count(),
mastery_model=json.dumps(mastery_model),
randomize=randomize,
is_manipulable=ccnode.kind_id == content_kinds.EXERCISE,
)
return exercise_data
def create_perseus_zip(ccnode, exercise_data, write_to_path):
with zipfile.ZipFile(write_to_path, "w") as zf:
try:
exercise_context = {
'exercise': json.dumps(exercise_data, sort_keys=True, indent=4)
}
exercise_result = render_to_string('perseus/exercise.json', exercise_context)
write_to_zipfile("exercise.json", exercise_result, zf)
for question in ccnode.assessment_items.prefetch_related('files').all().order_by('order'):
try:
for image in question.files.filter(preset_id=format_presets.EXERCISE_IMAGE).order_by('checksum'):
image_name = "images/{}.{}".format(image.checksum, image.file_format_id)
if image_name not in zf.namelist():
with storage.open(ccmodels.generate_object_storage_name(image.checksum, str(image)), 'rb') as content:
write_to_zipfile(image_name, content.read(), zf)
for image in question.files.filter(preset_id=format_presets.EXERCISE_GRAPHIE).order_by('checksum'):
svg_name = "images/{0}.svg".format(image.original_filename)
json_name = "images/{0}-data.json".format(image.original_filename)
if svg_name not in zf.namelist() or json_name not in zf.namelist():
with storage.open(ccmodels.generate_object_storage_name(image.checksum, str(image)), 'rb') as content:
content = content.read()
content = content.split(exercises.GRAPHIE_DELIMITER)
write_to_zipfile(svg_name, content[0], zf)
write_to_zipfile(json_name, content[1], zf)
write_assessment_item(question, zf)
except Exception as e:
logging.error("Publishing error: {}".format(str(e)))
finally:
zf.close()
def write_to_zipfile(filename, content, zf):
info = zipfile.ZipInfo(filename, date_time=(2013, 3, 14, 1, 59, 26))
info.comment = "Perseus file generated during export process".encode()
info.compress_type = zipfile.ZIP_STORED
info.create_system = 0
zf.writestr(info, content)
def write_assessment_item(assessment_item, zf):
if assessment_item.type == exercises.MULTIPLE_SELECTION:
template = 'perseus/multiple_selection.json'
elif assessment_item.type == exercises.SINGLE_SELECTION or assessment_item.type == 'true_false':
template = 'perseus/multiple_selection.json'
elif assessment_item.type == exercises.INPUT_QUESTION:
template = 'perseus/input_question.json'
elif assessment_item.type == exercises.PERSEUS_QUESTION:
template = 'perseus/perseus_question.json'
else:
raise TypeError("Unrecognized question type on item {}".format(assessment_item.assessment_id))
question = process_formulas(assessment_item.question)
question, question_images = process_image_strings(question, zf)
answer_data = json.loads(assessment_item.answers)
for answer in answer_data:
if assessment_item.type == exercises.INPUT_QUESTION:
answer['answer'] = extract_value(answer['answer'])
else:
answer['answer'] = answer['answer'].replace(exercises.CONTENT_STORAGE_PLACEHOLDER, PERSEUS_IMG_DIR)
answer['answer'] = process_formulas(answer['answer'])
# In case perseus doesn't support =wxh syntax, use below code
answer['answer'], answer_images = process_image_strings(answer['answer'], zf)
answer.update({'images': answer_images})
answer_data = list(filter(lambda a: a['answer'] or a['answer'] == 0, answer_data)) # Filter out empty answers, but not 0
hint_data = json.loads(assessment_item.hints)
for hint in hint_data:
hint['hint'] = process_formulas(hint['hint'])
hint['hint'], hint_images = process_image_strings(hint['hint'], zf)
hint.update({'images': hint_images})
context = {
'question': question,
'question_images': question_images,
'answers': sorted(answer_data, lambda x, y: cmp(x.get('order'), y.get('order'))),
'multiple_select': assessment_item.type == exercises.MULTIPLE_SELECTION,
'raw_data': assessment_item.raw_data.replace(exercises.CONTENT_STORAGE_PLACEHOLDER, PERSEUS_IMG_DIR),
'hints': sorted(hint_data, lambda x, y: cmp(x.get('order'), y.get('order'))),
'randomize': assessment_item.randomize,
}
result = render_to_string(template, context).encode('utf-8', "ignore")
write_to_zipfile("{0}.json".format(assessment_item.assessment_id), result, zf)
def process_formulas(content):
for match in re.finditer(ur'\$(\$.+\$)\$', content):
content = content.replace(match.group(0), match.group(1))
return content
def process_image_strings(content, zf):
image_list = []
content = content.replace(exercises.CONTENT_STORAGE_PLACEHOLDER, PERSEUS_IMG_DIR)
for match in re.finditer(ur'!\[(?:[^\]]*)]\(([^\)]+)\)', content):
img_match = re.search(ur'(.+/images/[^\s]+)(?:\s=([0-9\.]+)x([0-9\.]+))*', match.group(1))
if img_match:
# Add any image files that haven't been written to the zipfile
filename = img_match.group(1).split('/')[-1]
checksum, ext = os.path.splitext(filename)
image_name = "images/{}.{}".format(checksum, ext[1:])
if image_name not in zf.namelist():
with storage.open(ccmodels.generate_object_storage_name(checksum, filename), 'rb') as imgfile:
write_to_zipfile(image_name, imgfile.read(), zf)
# Add resizing data
if img_match.group(2) and img_match.group(3):
image_data = {'name': img_match.group(1)}
image_data.update({'width': float(img_match.group(2))})
image_data.update({'height': float(img_match.group(3))})
image_list.append(image_data)
content = content.replace(match.group(1), img_match.group(1))
return content, image_list
def map_prerequisites(root_node):
for n in ccmodels.PrerequisiteContentRelationship.objects.filter(prerequisite__tree_id=root_node.tree_id)\
.values('prerequisite__node_id', 'target_node__node_id'):
target_node = kolibrimodels.ContentNode.objects.get(pk=n['target_node__node_id'])
target_node.has_prerequisite.add(n['prerequisite__node_id'])
def map_channel_to_kolibri_channel(channel):
logging.debug("Generating the channel metadata.")
channel.icon_encoding = convert_channel_thumbnail(channel)
channel.save()
kolibri_channel = kolibrimodels.ChannelMetadata.objects.create(
id=channel.id,
name=channel.name,
description=channel.description,
version=channel.version + 1, # Need to save as version being published, not current version
thumbnail=channel.icon_encoding,
root_pk=channel.main_tree.node_id,
root_id=channel.main_tree.node_id,
min_schema_version=MIN_SCHEMA_VERSION, # Need to modify Kolibri so we can import this without importing models
)
logging.info("Generated the channel metadata.")
return kolibri_channel
def convert_channel_thumbnail(channel):
""" encode_thumbnail: gets base64 encoding of thumbnail
Args:
thumbnail (str): file path or url to channel's thumbnail
Returns: base64 encoding of thumbnail
"""
if not channel.thumbnail or channel.thumbnail == '' or 'static' in channel.thumbnail:
return ""
if channel.thumbnail_encoding:
try:
thumbnail_data = channel.thumbnail_encoding
if thumbnail_data.get("base64"):
return thumbnail_data["base64"]
except ValueError:
logging.error("ERROR: channel thumbnail is not in correct format ({}: {})".format(channel.id, channel.thumbnail_encoding))
return get_thumbnail_encoding(channel.thumbnail)
def map_tags_to_node(kolibrinode, ccnode):
""" map_tags_to_node: assigns tags to nodes (creates fk relationship)
Args:
kolibrinode (kolibri.models.ContentNode): node to map tag to
ccnode (contentcuration.models.ContentNode): node with tags to map
Returns: None
"""
tags_to_add = []
for tag in ccnode.tags.all():
t, _new = kolibrimodels.ContentTag.objects.get_or_create(pk=tag.pk, tag_name=tag.tag_name)
tags_to_add.append(t)
kolibrinode.tags = tags_to_add
kolibrinode.save()
def prepare_export_database(tempdb):
call_command("flush", "--noinput", database=get_active_content_database()) # clears the db!
call_command("migrate",
"content",
run_syncdb=True,
database=get_active_content_database(),
noinput=True)
logging.info("Prepared the export database.")
def raise_if_nodes_are_all_unchanged(channel):
logging.debug("Checking if we have any changed nodes.")
changed_models = channel.main_tree.get_family().filter(changed=True)
if changed_models.count() == 0:
logging.debug("No nodes have been changed!")
raise EarlyExit(message="No models changed!", db_path=None)
logging.info("Some nodes are changed.")
def mark_all_nodes_as_changed(channel):
logging.debug("Marking all nodes as changed.")
channel.main_tree.get_family().update(changed=False, published=True)
logging.info("Marked all nodes as changed.")
def save_export_database(channel_id):
logging.debug("Saving export database")
current_export_db_location = get_active_content_database()
target_export_db_location = os.path.join(settings.DB_ROOT, "{id}.sqlite3".format(id=channel_id))
with open(current_export_db_location) as currentf:
storage.save(target_export_db_location, currentf)
logging.info("Successfully copied to {}".format(target_export_db_location))
def add_tokens_to_channel(channel):
if not channel.secret_tokens.filter(is_primary=True).exists():
logging.info("Generating tokens for the channel.")
channel.make_token()
def fill_published_fields(channel):
published_nodes = channel.main_tree.get_descendants().filter(published=True).prefetch_related('files')
channel.total_resource_count = published_nodes.exclude(kind_id=content_kinds.TOPIC).count()
channel.published_kind_count = json.dumps(list(published_nodes.values('kind_id').annotate(count=Count('kind_id')).order_by('kind_id')))
channel.published_size = published_nodes.values('files__checksum', 'files__file_size').distinct(
).aggregate(resource_size=Sum('files__file_size'))['resource_size'] or 0
node_languages = published_nodes.exclude(language=None).values_list('language', flat=True)
file_languages = published_nodes.values_list('files__language', flat=True)
language_list = list(set(chain(node_languages, file_languages)))
for lang in language_list:
if lang:
channel.included_languages.add(lang)
channel.save()
|
mit
|
jasonwzhy/django
|
django/db/models/lookups.py
|
194
|
16328
|
import inspect
from copy import copy
from django.utils.functional import cached_property
from django.utils.six.moves import range
from .query_utils import QueryWrapper
class RegisterLookupMixin(object):
def _get_lookup(self, lookup_name):
try:
return self.class_lookups[lookup_name]
except KeyError:
# To allow for inheritance, check parent class' class_lookups.
for parent in inspect.getmro(self.__class__):
if 'class_lookups' not in parent.__dict__:
continue
if lookup_name in parent.class_lookups:
return parent.class_lookups[lookup_name]
except AttributeError:
# This class didn't have any class_lookups
pass
return None
def get_lookup(self, lookup_name):
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_lookup(lookup_name)
if found is not None and not issubclass(found, Lookup):
return None
return found
def get_transform(self, lookup_name):
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_transform(lookup_name)
if found is not None and not issubclass(found, Transform):
return None
return found
@classmethod
def register_lookup(cls, lookup):
if 'class_lookups' not in cls.__dict__:
cls.class_lookups = {}
cls.class_lookups[lookup.lookup_name] = lookup
return lookup
@classmethod
def _unregister_lookup(cls, lookup):
"""
Removes given lookup from cls lookups. Meant to be used in
tests only.
"""
del cls.class_lookups[lookup.lookup_name]
class Transform(RegisterLookupMixin):
bilateral = False
def __init__(self, lhs, lookups):
self.lhs = lhs
self.init_lookups = lookups[:]
def as_sql(self, compiler, connection):
raise NotImplementedError
@cached_property
def output_field(self):
return self.lhs.output_field
def copy(self):
return copy(self)
def relabeled_clone(self, relabels):
copy = self.copy()
copy.lhs = self.lhs.relabeled_clone(relabels)
return copy
def get_group_by_cols(self):
return self.lhs.get_group_by_cols()
def get_bilateral_transforms(self):
if hasattr(self.lhs, 'get_bilateral_transforms'):
bilateral_transforms = self.lhs.get_bilateral_transforms()
else:
bilateral_transforms = []
if self.bilateral:
bilateral_transforms.append((self.__class__, self.init_lookups))
return bilateral_transforms
@cached_property
def contains_aggregate(self):
return self.lhs.contains_aggregate
class Lookup(RegisterLookupMixin):
lookup_name = None
def __init__(self, lhs, rhs):
self.lhs, self.rhs = lhs, rhs
self.rhs = self.get_prep_lookup()
if hasattr(self.lhs, 'get_bilateral_transforms'):
bilateral_transforms = self.lhs.get_bilateral_transforms()
else:
bilateral_transforms = []
if bilateral_transforms:
# We should warn the user as soon as possible if he is trying to apply
# a bilateral transformation on a nested QuerySet: that won't work.
# We need to import QuerySet here so as to avoid circular
from django.db.models.query import QuerySet
if isinstance(rhs, QuerySet):
raise NotImplementedError("Bilateral transformations on nested querysets are not supported.")
self.bilateral_transforms = bilateral_transforms
def apply_bilateral_transforms(self, value):
for transform, lookups in self.bilateral_transforms:
value = transform(value, lookups)
return value
def batch_process_rhs(self, compiler, connection, rhs=None):
if rhs is None:
rhs = self.rhs
if self.bilateral_transforms:
sqls, sqls_params = [], []
for p in rhs:
value = QueryWrapper('%s',
[self.lhs.output_field.get_db_prep_value(p, connection)])
value = self.apply_bilateral_transforms(value)
sql, sql_params = compiler.compile(value)
sqls.append(sql)
sqls_params.extend(sql_params)
else:
params = self.lhs.output_field.get_db_prep_lookup(
self.lookup_name, rhs, connection, prepared=True)
sqls, sqls_params = ['%s'] * len(params), params
return sqls, sqls_params
def get_prep_lookup(self):
return self.lhs.output_field.get_prep_lookup(self.lookup_name, self.rhs)
def get_db_prep_lookup(self, value, connection):
return (
'%s', self.lhs.output_field.get_db_prep_lookup(
self.lookup_name, value, connection, prepared=True))
def process_lhs(self, compiler, connection, lhs=None):
lhs = lhs or self.lhs
return compiler.compile(lhs)
def process_rhs(self, compiler, connection):
value = self.rhs
if self.bilateral_transforms:
if self.rhs_is_direct_value():
# Do not call get_db_prep_lookup here as the value will be
# transformed before being used for lookup
value = QueryWrapper("%s",
[self.lhs.output_field.get_db_prep_value(value, connection)])
value = self.apply_bilateral_transforms(value)
# Due to historical reasons there are a couple of different
# ways to produce sql here. get_compiler is likely a Query
# instance, _as_sql QuerySet and as_sql just something with
# as_sql. Finally the value can of course be just plain
# Python value.
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql'):
sql, params = compiler.compile(value)
return '(' + sql + ')', params
if hasattr(value, '_as_sql'):
sql, params = value._as_sql(connection=connection)
return '(' + sql + ')', params
else:
return self.get_db_prep_lookup(value, connection)
def rhs_is_direct_value(self):
return not(
hasattr(self.rhs, 'as_sql') or
hasattr(self.rhs, '_as_sql') or
hasattr(self.rhs, 'get_compiler'))
def relabeled_clone(self, relabels):
new = copy(self)
new.lhs = new.lhs.relabeled_clone(relabels)
if hasattr(new.rhs, 'relabeled_clone'):
new.rhs = new.rhs.relabeled_clone(relabels)
return new
def get_group_by_cols(self):
cols = self.lhs.get_group_by_cols()
if hasattr(self.rhs, 'get_group_by_cols'):
cols.extend(self.rhs.get_group_by_cols())
return cols
def as_sql(self, compiler, connection):
raise NotImplementedError
@cached_property
def contains_aggregate(self):
return self.lhs.contains_aggregate or getattr(self.rhs, 'contains_aggregate', False)
class BuiltinLookup(Lookup):
def process_lhs(self, compiler, connection, lhs=None):
lhs_sql, params = super(BuiltinLookup, self).process_lhs(
compiler, connection, lhs)
field_internal_type = self.lhs.output_field.get_internal_type()
db_type = self.lhs.output_field.db_type(connection=connection)
lhs_sql = connection.ops.field_cast_sql(
db_type, field_internal_type) % lhs_sql
lhs_sql = connection.ops.lookup_cast(self.lookup_name, field_internal_type) % lhs_sql
return lhs_sql, params
def as_sql(self, compiler, connection):
lhs_sql, params = self.process_lhs(compiler, connection)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
params.extend(rhs_params)
rhs_sql = self.get_rhs_op(connection, rhs_sql)
return '%s %s' % (lhs_sql, rhs_sql), params
def get_rhs_op(self, connection, rhs):
return connection.operators[self.lookup_name] % rhs
default_lookups = {}
class Exact(BuiltinLookup):
lookup_name = 'exact'
default_lookups['exact'] = Exact
class IExact(BuiltinLookup):
lookup_name = 'iexact'
def process_rhs(self, qn, connection):
rhs, params = super(IExact, self).process_rhs(qn, connection)
if params:
params[0] = connection.ops.prep_for_iexact_query(params[0])
return rhs, params
default_lookups['iexact'] = IExact
class GreaterThan(BuiltinLookup):
lookup_name = 'gt'
default_lookups['gt'] = GreaterThan
class GreaterThanOrEqual(BuiltinLookup):
lookup_name = 'gte'
default_lookups['gte'] = GreaterThanOrEqual
class LessThan(BuiltinLookup):
lookup_name = 'lt'
default_lookups['lt'] = LessThan
class LessThanOrEqual(BuiltinLookup):
lookup_name = 'lte'
default_lookups['lte'] = LessThanOrEqual
class In(BuiltinLookup):
lookup_name = 'in'
def process_rhs(self, compiler, connection):
if self.rhs_is_direct_value():
# rhs should be an iterable, we use batch_process_rhs
# to prepare/transform those values
rhs = list(self.rhs)
if not rhs:
from django.db.models.sql.datastructures import EmptyResultSet
raise EmptyResultSet
sqls, sqls_params = self.batch_process_rhs(compiler, connection, rhs)
placeholder = '(' + ', '.join(sqls) + ')'
return (placeholder, sqls_params)
else:
return super(In, self).process_rhs(compiler, connection)
def get_rhs_op(self, connection, rhs):
return 'IN %s' % rhs
def as_sql(self, compiler, connection):
max_in_list_size = connection.ops.max_in_list_size()
if self.rhs_is_direct_value() and (max_in_list_size and
len(self.rhs) > max_in_list_size):
# This is a special case for Oracle which limits the number of elements
# which can appear in an 'IN' clause.
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.batch_process_rhs(compiler, connection)
in_clause_elements = ['(']
params = []
for offset in range(0, len(rhs_params), max_in_list_size):
if offset > 0:
in_clause_elements.append(' OR ')
in_clause_elements.append('%s IN (' % lhs)
params.extend(lhs_params)
sqls = rhs[offset: offset + max_in_list_size]
sqls_params = rhs_params[offset: offset + max_in_list_size]
param_group = ', '.join(sqls)
in_clause_elements.append(param_group)
in_clause_elements.append(')')
params.extend(sqls_params)
in_clause_elements.append(')')
return ''.join(in_clause_elements), params
else:
return super(In, self).as_sql(compiler, connection)
default_lookups['in'] = In
class PatternLookup(BuiltinLookup):
def get_rhs_op(self, connection, rhs):
# Assume we are in startswith. We need to produce SQL like:
# col LIKE %s, ['thevalue%']
# For python values we can (and should) do that directly in Python,
# but if the value is for example reference to other column, then
# we need to add the % pattern match to the lookup by something like
# col LIKE othercol || '%%'
# So, for Python values we don't need any special pattern, but for
# SQL reference values or SQL transformations we need the correct
# pattern added.
if (hasattr(self.rhs, 'get_compiler') or hasattr(self.rhs, 'as_sql')
or hasattr(self.rhs, '_as_sql') or self.bilateral_transforms):
pattern = connection.pattern_ops[self.lookup_name].format(connection.pattern_esc)
return pattern.format(rhs)
else:
return super(PatternLookup, self).get_rhs_op(connection, rhs)
class Contains(PatternLookup):
lookup_name = 'contains'
def process_rhs(self, qn, connection):
rhs, params = super(Contains, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%%%s%%" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['contains'] = Contains
class IContains(Contains):
lookup_name = 'icontains'
default_lookups['icontains'] = IContains
class StartsWith(PatternLookup):
lookup_name = 'startswith'
def process_rhs(self, qn, connection):
rhs, params = super(StartsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%s%%" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['startswith'] = StartsWith
class IStartsWith(PatternLookup):
lookup_name = 'istartswith'
def process_rhs(self, qn, connection):
rhs, params = super(IStartsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%s%%" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['istartswith'] = IStartsWith
class EndsWith(PatternLookup):
lookup_name = 'endswith'
def process_rhs(self, qn, connection):
rhs, params = super(EndsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%%%s" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['endswith'] = EndsWith
class IEndsWith(PatternLookup):
lookup_name = 'iendswith'
def process_rhs(self, qn, connection):
rhs, params = super(IEndsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%%%s" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['iendswith'] = IEndsWith
class Between(BuiltinLookup):
def get_rhs_op(self, connection, rhs):
return "BETWEEN %s AND %s" % (rhs, rhs)
class Range(BuiltinLookup):
lookup_name = 'range'
def get_rhs_op(self, connection, rhs):
return "BETWEEN %s AND %s" % (rhs[0], rhs[1])
def process_rhs(self, compiler, connection):
if self.rhs_is_direct_value():
# rhs should be an iterable of 2 values, we use batch_process_rhs
# to prepare/transform those values
return self.batch_process_rhs(compiler, connection)
else:
return super(Range, self).process_rhs(compiler, connection)
default_lookups['range'] = Range
class IsNull(BuiltinLookup):
lookup_name = 'isnull'
def as_sql(self, compiler, connection):
sql, params = compiler.compile(self.lhs)
if self.rhs:
return "%s IS NULL" % sql, params
else:
return "%s IS NOT NULL" % sql, params
default_lookups['isnull'] = IsNull
class Search(BuiltinLookup):
lookup_name = 'search'
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
sql_template = connection.ops.fulltext_search_sql(field_name=lhs)
return sql_template, lhs_params + rhs_params
default_lookups['search'] = Search
class Regex(BuiltinLookup):
lookup_name = 'regex'
def as_sql(self, compiler, connection):
if self.lookup_name in connection.operators:
return super(Regex, self).as_sql(compiler, connection)
else:
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
sql_template = connection.ops.regex_lookup(self.lookup_name)
return sql_template % (lhs, rhs), lhs_params + rhs_params
default_lookups['regex'] = Regex
class IRegex(Regex):
lookup_name = 'iregex'
default_lookups['iregex'] = IRegex
|
bsd-3-clause
|
kaedroho/wagtail
|
wagtail/core/hooks.py
|
2
|
2294
|
from contextlib import ContextDecorator
from operator import itemgetter
from wagtail.utils.apps import get_app_submodules
_hooks = {}
def register(hook_name, fn=None, order=0):
"""
Register hook for ``hook_name``. Can be used as a decorator::
@register('hook_name')
def my_hook(...):
pass
or as a function call::
def my_hook(...):
pass
register('hook_name', my_hook)
"""
# Pretend to be a decorator if fn is not supplied
if fn is None:
def decorator(fn):
register(hook_name, fn, order=order)
return fn
return decorator
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append((fn, order))
class TemporaryHook(ContextDecorator):
def __init__(self, hook_name, fn, order):
self.hook_name = hook_name
self.fn = fn
self.order = order
def __enter__(self):
if self.hook_name not in _hooks:
_hooks[self.hook_name] = []
_hooks[self.hook_name].append((self.fn, self.order))
def __exit__(self, exc_type, exc_value, traceback):
_hooks[self.hook_name].remove((self.fn, self.order))
def register_temporarily(hook_name, fn, order=0):
"""
Register hook for ``hook_name`` temporarily. This is useful for testing hooks.
Can be used as a decorator::
def my_hook(...):
pass
class TestMyHook(Testcase):
@hooks.register_temporarily('hook_name', my_hook)
def test_my_hook(self):
pass
or as a context manager::
def my_hook(...):
pass
with hooks.register_temporarily('hook_name', my_hook):
# Hook is registered here
# Hook is unregistered here
"""
return TemporaryHook(hook_name, fn, order)
_searched_for_hooks = False
def search_for_hooks():
global _searched_for_hooks
if not _searched_for_hooks:
list(get_app_submodules('wagtail_hooks'))
_searched_for_hooks = True
def get_hooks(hook_name):
""" Return the hooks function sorted by their order. """
search_for_hooks()
hooks = _hooks.get(hook_name, [])
hooks = sorted(hooks, key=itemgetter(1))
return [hook[0] for hook in hooks]
|
bsd-3-clause
|
foreni-packages/hachoir-parser
|
hachoir_parser/file_system/linux_swap.py
|
95
|
3777
|
"""
Linux swap file.
Documentation: Linux kernel source code, files:
- mm/swapfile.c
- include/linux/swap.h
Author: Victor Stinner
Creation date: 25 december 2006 (christmas ;-))
"""
from hachoir_parser import Parser
from hachoir_core.field import (ParserError, GenericVector,
UInt32, String,
Bytes, NullBytes, RawBytes)
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_core.tools import humanFilesize
from hachoir_core.bits import str2hex
PAGE_SIZE = 4096
# Definition of MAX_SWAP_BADPAGES in Linux kernel:
# (__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)
MAX_SWAP_BADPAGES = ((PAGE_SIZE - 10) - 1536) // 4
class Page(RawBytes):
static_size = PAGE_SIZE*8
def __init__(self, parent, name):
RawBytes.__init__(self, parent, name, PAGE_SIZE)
class UUID(Bytes):
static_size = 16*8
def __init__(self, parent, name):
Bytes.__init__(self, parent, name, 16)
def createDisplay(self):
text = str2hex(self.value, format=r"%02x")
return "%s-%s-%s-%s-%s" % (
text[:8], text[8:12], text[12:16], text[16:20], text[20:])
class LinuxSwapFile(Parser):
PARSER_TAGS = {
"id": "linux_swap",
"file_ext": ("",),
"category": "file_system",
"min_size": PAGE_SIZE*8,
"description": "Linux swap file",
"magic": (
("SWAP-SPACE", (PAGE_SIZE-10)*8),
("SWAPSPACE2", (PAGE_SIZE-10)*8),
("S1SUSPEND\0", (PAGE_SIZE-10)*8),
),
}
endian = LITTLE_ENDIAN
def validate(self):
magic = self.stream.readBytes((PAGE_SIZE-10)*8, 10)
if magic not in ("SWAP-SPACE", "SWAPSPACE2", "S1SUSPEND\0"):
return "Unknown magic string"
if MAX_SWAP_BADPAGES < self["nb_badpage"].value:
return "Invalid number of bad page (%u)" % self["nb_badpage"].value
return True
def getPageCount(self):
"""
Number of pages which can really be used for swapping:
number of page minus bad pages minus one page (used for the header)
"""
# -1 because first page is used for the header
return self["last_page"].value - self["nb_badpage"].value - 1
def createDescription(self):
if self["magic"].value == "S1SUSPEND\0":
text = "Suspend swap file version 1"
elif self["magic"].value == "SWAPSPACE2":
text = "Linux swap file version 2"
else:
text = "Linux swap file version 1"
nb_page = self.getPageCount()
return "%s, page size: %s, %s pages" % (
text, humanFilesize(PAGE_SIZE), nb_page)
def createFields(self):
# First kilobyte: boot sectors
yield RawBytes(self, "boot", 1024, "Space for disklabel etc.")
# Header
yield UInt32(self, "version")
yield UInt32(self, "last_page")
yield UInt32(self, "nb_badpage")
yield UUID(self, "sws_uuid")
yield UUID(self, "sws_volume")
yield NullBytes(self, "reserved", 117*4)
# Read bad pages (if any)
count = self["nb_badpage"].value
if count:
if MAX_SWAP_BADPAGES < count:
raise ParserError("Invalid number of bad page (%u)" % count)
yield GenericVector(self, "badpages", count, UInt32, "badpage")
# Read magic
padding = self.seekByte(PAGE_SIZE - 10, "padding", null=True)
if padding:
yield padding
yield String(self, "magic", 10, charset="ASCII")
# Read all pages
yield GenericVector(self, "pages", self["last_page"].value, Page, "page")
# Padding at the end
padding = self.seekBit(self.size, "end_padding", null=True)
if padding:
yield padding
|
gpl-2.0
|
ntt-sic/nova
|
nova/compute/api.py
|
1
|
161884
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012-2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests relating to compute resources (e.g. guest VMs,
networking and storage of VMs, and compute hosts on which they run)."""
import base64
import functools
import re
import string
import uuid
from oslo.config import cfg
from nova import availability_zones
from nova import block_device
from nova.cells import opts as cells_opts
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import crypto
from nova.db import base
from nova import exception
from nova import hooks
from nova.image import glance
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova.network.security_group import security_group_base
from nova import notifications
from nova import notifier
from nova.objects import aggregate as aggregate_obj
from nova.objects import base as obj_base
from nova.objects import instance as instance_obj
from nova.objects import instance_action
from nova.objects import instance_info_cache
from nova.objects import keypair as keypair_obj
from nova.objects import migration as migration_obj
from nova.objects import security_group as security_group_obj
from nova.objects import service as service_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
import nova.policy
from nova import quota
from nova import servicegroup
from nova import utils
from nova import volume
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(notifier.get_notifier, service='compute')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
compute_opts = [
cfg.BoolOpt('allow_resize_to_same_host',
default=False,
help='Allow destination machine to match source for resize. '
'Useful when testing in single-host environments.'),
cfg.BoolOpt('allow_migrate_to_same_host',
default=False,
help='Allow migrate machine to the same host. '
'Useful when testing in single-host environments.'),
cfg.StrOpt('default_schedule_zone',
help='availability zone to use when user doesn\'t specify one'),
cfg.ListOpt('non_inheritable_image_properties',
default=['cache_in_nova',
'bittorrent'],
help='These are image properties which a snapshot should not'
' inherit from an instance'),
cfg.StrOpt('null_kernel',
default='nokernel',
help='kernel image that indicates not to use a kernel, but to '
'use a raw disk image instead'),
cfg.StrOpt('multi_instance_display_name_template',
default='%(name)s-%(uuid)s',
help='When creating multiple instances with a single request '
'using the os-multiple-create API extension, this '
'template will be used to build the display name for '
'each instance. The benefit is that the instances '
'end up with different hostnames. To restore legacy '
'behavior of every instance having the same name, set '
'this option to "%(name)s". Valid keys for the '
'template are: name, uuid, count.'),
cfg.IntOpt('max_local_block_devices',
default=3,
help='Maximum number of devices that will result '
'in a local image being created on the hypervisor node. '
'Setting this to 0 means nova will allow only '
'boot from volume. A negative number means unlimited.'),
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
MAX_USERDATA_SIZE = 65535
QUOTAS = quota.QUOTAS
RO_SECURITY_GROUPS = ['default']
def check_instance_state(vm_state=None, task_state=(None,),
must_have_launched=True):
"""Decorator to check VM and/or task state before entry to API functions.
If the instance is in the wrong state, or has not been successfully
started at least once the wrapper will raise an exception.
"""
if vm_state is not None and not isinstance(vm_state, set):
vm_state = set(vm_state)
if task_state is not None and not isinstance(task_state, set):
task_state = set(task_state)
def outer(f):
@functools.wraps(f)
def inner(self, context, instance, *args, **kw):
if vm_state is not None and instance['vm_state'] not in vm_state:
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
if (task_state is not None and
instance['task_state'] not in task_state):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method=f.__name__)
if must_have_launched and not instance['launched_at']:
raise exception.InstanceInvalidState(
attr=None,
not_launched=True,
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
return f(self, context, instance, *args, **kw)
return inner
return outer
def check_instance_host(function):
@functools.wraps(function)
def wrapped(self, context, instance, *args, **kwargs):
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return wrapped
def check_instance_lock(function):
@functools.wraps(function)
def inner(self, context, instance, *args, **kwargs):
if instance['locked'] and not context.is_admin:
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return inner
def policy_decorator(scope):
"""Check corresponding policy prior of wrapped method to execution."""
def outer(func):
@functools.wraps(func)
def wrapped(self, context, target, *args, **kwargs):
check_policy(context, func.__name__, target, scope)
return func(self, context, target, *args, **kwargs)
return wrapped
return outer
wrap_check_policy = policy_decorator(scope='compute')
wrap_check_security_groups_policy = policy_decorator(
scope='compute:security_groups')
def check_policy(context, action, target, scope='compute'):
_action = '%s:%s' % (scope, action)
nova.policy.enforce(context, _action, target)
def check_instance_cell(fn):
def _wrapped(self, context, instance, *args, **kwargs):
self._validate_cell(instance, fn.__name__)
return fn(self, context, instance, *args, **kwargs)
_wrapped.__name__ = fn.__name__
return _wrapped
class API(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, image_service=None, network_api=None, volume_api=None,
security_group_api=None, **kwargs):
self.image_service = (image_service or
glance.get_default_image_service())
self.network_api = network_api or network.API()
self.volume_api = volume_api or volume.API()
self.security_group_api = (security_group_api or
openstack_driver.get_openstack_security_group_driver())
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self._compute_task_api = None
self.servicegroup_api = servicegroup.API()
self.notifier = notifier.get_notifier('compute', CONF.host)
super(API, self).__init__(**kwargs)
@property
def compute_task_api(self):
if self._compute_task_api is None:
# TODO(alaski): Remove calls into here from conductor manager so
# that this isn't necessary. #1180540
from nova import conductor
self._compute_task_api = conductor.ComputeTaskAPI()
return self._compute_task_api
@property
def cell_type(self):
try:
return getattr(self, '_cell_type')
except AttributeError:
self._cell_type = cells_opts.get_cell_type()
return self._cell_type
def _cell_read_only(self, cell_name):
"""Is the target cell in a read-only mode?"""
# FIXME(comstud): Add support for this.
return False
def _validate_cell(self, instance, method):
if self.cell_type != 'api':
return
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(
instance_uuid=instance['uuid'])
if self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance['uuid'],
state="temporary_readonly",
method=method)
def _record_action_start(self, context, instance, action):
instance_action.InstanceAction.action_start(context,
instance['uuid'],
action,
want_result=False)
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
Raises a QuotaError if any limit is exceeded.
"""
if injected_files is None:
return
# Check number of files first
try:
QUOTAS.limit_check(context, injected_files=len(injected_files))
except exception.OverQuota:
raise exception.OnsetFileLimitExceeded()
# OK, now count path and content lengths; we're looking for
# the max...
max_path = 0
max_content = 0
for path, content in injected_files:
max_path = max(max_path, len(path))
max_content = max(max_content, len(content))
try:
QUOTAS.limit_check(context, injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
except exception.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
if 'injected_file_path_bytes' in exc.kwargs['overs']:
raise exception.OnsetFilePathLimitExceeded()
else:
raise exception.OnsetFileContentLimitExceeded()
def _check_num_instances_quota(self, context, instance_type, min_count,
max_count):
"""Enforce quota limits on number of instances created."""
# Determine requested cores and ram
req_cores = max_count * instance_type['vcpus']
req_ram = max_count * instance_type['memory_mb']
# Check the quota
try:
reservations = QUOTAS.reserve(context, instances=max_count,
cores=req_cores, ram=req_ram)
except exception.OverQuota as exc:
# OK, we exceeded quota; let's figure out why...
quotas = exc.kwargs['quotas']
usages = exc.kwargs['usages']
overs = exc.kwargs['overs']
headroom = dict((res, quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved']))
for res in quotas.keys())
allowed = headroom['instances']
# Reduce 'allowed' instances in line with the cores & ram headroom
if instance_type['vcpus']:
allowed = min(allowed,
headroom['cores'] // instance_type['vcpus'])
if instance_type['memory_mb']:
allowed = min(allowed,
headroom['ram'] // instance_type['memory_mb'])
# Convert to the appropriate exception message
if allowed <= 0:
msg = _("Cannot run any more instances of this type.")
allowed = 0
elif min_count <= allowed <= max_count:
# We're actually OK, but still need reservations
return self._check_num_instances_quota(context, instance_type,
min_count, allowed)
else:
msg = (_("Can only run %s more instances of this type.") %
allowed)
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
params = {'overs': overs, 'pid': context.project_id,
'min_count': min_count, 'max_count': max_count,
'msg': msg}
if min_count == max_count:
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)d instances. %(msg)s"),
params)
else:
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run between %(min_count)d and"
" %(max_count)d instances. %(msg)s"),
params)
num_instances = (str(min_count) if min_count == max_count else
"%s-%s" % (min_count, max_count))
requested = dict(instances=num_instances, cores=req_cores,
ram=req_ram)
raise exception.TooManyInstances(overs=overs,
req=requested[resource],
used=used, allowed=total_allowed,
resource=resource)
return max_count, reservations
def _check_metadata_properties_quota(self, context, metadata=None):
"""Enforce quota limits on metadata properties."""
if not metadata:
metadata = {}
num_metadata = len(metadata)
try:
QUOTAS.limit_check(context, metadata_items=num_metadata)
except exception.OverQuota as exc:
LOG.warn(_("Quota exceeded for %(pid)s, tried to set "
"%(num_metadata)s metadata properties"),
{'pid': context.project_id,
'num_metadata': num_metadata})
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
# Because metadata is stored in the DB, we hard-code the size limits
# In future, we may support more variable length strings, so we act
# as if this is quota-controlled for forwards compatibility
for k, v in metadata.iteritems():
if len(k) == 0:
msg = _("Metadata property key blank")
LOG.warn(msg)
raise exception.InvalidMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidMetadataSize(reason=msg)
def _check_requested_secgroups(self, context, secgroups):
"""
Check if the security group requested exists and belongs to
the project.
"""
for secgroup in secgroups:
# NOTE(sdague): default is handled special
if secgroup == "default":
continue
if not self.security_group_api.get(context, secgroup):
raise exception.SecurityGroupNotFoundForProject(
project_id=context.project_id, security_group_id=secgroup)
def _check_requested_networks(self, context, requested_networks):
"""
Check if the networks requested belongs to the project
and the fixed IP address for each network provided is within
same the network block
"""
self.network_api.validate_networks(context, requested_networks)
@staticmethod
def _handle_kernel_and_ramdisk(context, kernel_id, ramdisk_id, image):
"""Choose kernel and ramdisk appropriate for the instance.
The kernel and ramdisk can be chosen in one of three ways:
1. Passed in with create-instance request.
2. Inherited from image.
3. Forced to None by using `null_kernel` FLAG.
"""
# Inherit from image if not specified
image_properties = image.get('properties', {})
if kernel_id is None:
kernel_id = image_properties.get('kernel_id')
if ramdisk_id is None:
ramdisk_id = image_properties.get('ramdisk_id')
# Force to None if using null_kernel
if kernel_id == str(CONF.null_kernel):
kernel_id = None
ramdisk_id = None
# Verify kernel and ramdisk exist (fail-fast)
if kernel_id is not None:
image_service, kernel_id = glance.get_remote_image_service(
context, kernel_id)
image_service.show(context, kernel_id)
if ramdisk_id is not None:
image_service, ramdisk_id = glance.get_remote_image_service(
context, ramdisk_id)
image_service.show(context, ramdisk_id)
return kernel_id, ramdisk_id
@staticmethod
def _handle_availability_zone(context, availability_zone):
# NOTE(vish): We have a legacy hack to allow admins to specify hosts
# via az using az:host:node. It might be nice to expose an
# api to specify specific hosts to force onto, but for
# now it just supports this legacy hack.
# NOTE(deva): It is also possible to specify az::node, in which case
# the host manager will determine the correct host.
forced_host = None
forced_node = None
if availability_zone and ':' in availability_zone:
c = availability_zone.count(':')
if c == 1:
availability_zone, forced_host = availability_zone.split(':')
elif c == 2:
if '::' in availability_zone:
availability_zone, forced_node = \
availability_zone.split('::')
else:
availability_zone, forced_host, forced_node = \
availability_zone.split(':')
else:
raise exception.InvalidInput(
reason="Unable to parse availability_zone")
if not availability_zone:
availability_zone = CONF.default_schedule_zone
if forced_host:
check_policy(context, 'create:forced_host', {})
if forced_node:
check_policy(context, 'create:forced_host', {})
return availability_zone, forced_host, forced_node
def _ensure_auto_disk_config_is_valid(self, auto_disk_config_img,
auto_disk_config, image):
auto_disk_config_disabled = \
utils.is_auto_disk_config_disabled(auto_disk_config_img)
if auto_disk_config_disabled and auto_disk_config:
raise exception.AutoDiskConfigDisabledByImage(image=image)
def _inherit_properties_from_image(self, image, auto_disk_config):
image_properties = image.get('properties', {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_properties)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image.get("id"))
if auto_disk_config is None:
auto_disk_config = strutils.bool_from_string(auto_disk_config_img)
return {
'os_type': image_properties.get('os_type'),
'architecture': image_properties.get('architecture'),
'vm_mode': image_properties.get('vm_mode'),
'auto_disk_config': auto_disk_config
}
def _apply_instance_name_template(self, context, instance, index):
params = {
'uuid': instance['uuid'],
'name': instance['display_name'],
'count': index + 1,
}
try:
new_name = (CONF.multi_instance_display_name_template %
params)
except (KeyError, TypeError):
LOG.exception(_('Failed to set instance name using '
'multi_instance_display_name_template.'))
new_name = instance['display_name']
instance.display_name = new_name
if not instance.get('hostname', None):
instance.hostname = utils.sanitize_hostname(new_name)
instance.save()
return instance
def _check_config_drive(self, config_drive):
if config_drive:
try:
bool_val = strutils.bool_from_string(config_drive,
strict=True)
except ValueError:
raise exception.ConfigDriveInvalidValue(option=config_drive)
else:
bool_val = False
# FIXME(comstud): Bug ID 1193438 filed for this. This looks silly,
# but this is because the config drive column is a String. False
# is represented by using an empty string. And for whatever
# reason, we rely on the DB to cast True to a String.
return True if bool_val else ''
def _check_requested_image(self, context, image_id, image, instance_type):
if not image:
# Image checks don't apply when building from volume
return
if image['status'] != 'active':
raise exception.ImageNotActive(image_id=image_id)
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
raise exception.InstanceTypeMemoryTooSmall()
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
# since libvirt interpreted the value differently than other
# drivers. A value of 0 means don't check size.
root_gb = instance_type['root_gb']
if root_gb:
if int(image.get('size') or 0) > root_gb * (1024 ** 3):
raise exception.InstanceTypeDiskTooSmall()
if int(image.get('min_disk') or 0) > root_gb:
raise exception.InstanceTypeDiskTooSmall()
def _check_and_transform_bdm(self, base_options, min_count, max_count,
block_device_mapping, legacy_bdm):
if legacy_bdm:
# NOTE (ndipanov): Assume root dev name is 'vda' if not supplied.
# It's needed for legacy conversion to work.
root_device_name = (base_options.get('root_device_name') or 'vda')
block_device_mapping = block_device.from_legacy_mapping(
block_device_mapping, base_options.get('image_ref', ''),
root_device_name)
if min_count > 1 or max_count > 1:
if any(map(lambda bdm: bdm['source_type'] == 'volume',
block_device_mapping)):
msg = _('Cannot attach one or more volumes to multiple'
' instances')
raise exception.InvalidRequest(msg)
return block_device_mapping
def _get_image(self, context, image_href):
if not image_href:
return None, {}
(image_service, image_id) = glance.get_remote_image_service(
context, image_href)
image = image_service.show(context, image_id)
return image_id, image
def _checks_for_create_and_rebuild(self, context, image_id, image,
instance_type, metadata,
files_to_inject):
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, files_to_inject)
if image_id is not None:
self._check_requested_image(context, image_id,
image, instance_type)
def _validate_and_build_base_options(self, context, instance_type,
boot_meta, image_href, image_id,
kernel_id, ramdisk_id, display_name,
display_description, key_name,
key_data, security_groups,
availability_zone, forced_host,
user_data, metadata, injected_files,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping,
auto_disk_config, reservation_id):
"""Verify all the input parameters regardless of the provisioning
strategy being performed.
"""
if availability_zone:
available_zones = availability_zones.\
get_availability_zones(context.elevated(), True)
if forced_host is None and availability_zone not in \
available_zones:
msg = _('The requested availability zone is not available')
raise exception.InvalidRequest(msg)
if instance_type['disabled']:
raise exception.InstanceTypeNotFound(
instance_type_id=instance_type['id'])
if user_data:
l = len(user_data)
if l > MAX_USERDATA_SIZE:
# NOTE(mikal): user_data is stored in a text column, and
# the database might silently truncate if its over length.
raise exception.InstanceUserDataTooLarge(
length=l, maxsize=MAX_USERDATA_SIZE)
try:
base64.decodestring(user_data)
except base64.binascii.Error:
raise exception.InstanceUserDataMalformed()
self._checks_for_create_and_rebuild(context, image_id, boot_meta,
instance_type, metadata, injected_files)
self._check_requested_secgroups(context, security_groups)
self._check_requested_networks(context, requested_networks)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, kernel_id, ramdisk_id, boot_meta)
config_drive = self._check_config_drive(config_drive)
if key_data is None and key_name:
key_pair = keypair_obj.KeyPair.get_by_name(context,
context.user_id,
key_name)
key_data = key_pair.public_key
root_device_name = block_device.properties_root_device_name(
boot_meta.get('properties', {}))
system_metadata = flavors.save_flavor_info(
dict(), instance_type)
base_options = {
'reservation_id': reservation_id,
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
'power_state': power_state.NOSTATE,
'vm_state': vm_states.BUILDING,
'config_drive': config_drive,
'user_id': context.user_id,
'project_id': context.project_id,
'instance_type_id': instance_type['id'],
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'root_gb': instance_type['root_gb'],
'ephemeral_gb': instance_type['ephemeral_gb'],
'display_name': display_name,
'display_description': display_description or '',
'user_data': user_data,
'key_name': key_name,
'key_data': key_data,
'locked': False,
'metadata': metadata or {},
'access_ip_v4': access_ip_v4,
'access_ip_v6': access_ip_v6,
'availability_zone': availability_zone,
'root_device_name': root_device_name,
'progress': 0,
'system_metadata': system_metadata}
options_from_image = self._inherit_properties_from_image(
boot_meta, auto_disk_config)
base_options.update(options_from_image)
return base_options
def _build_filter_properties(self, context, scheduler_hints, forced_host,
forced_node, instance_type):
filter_properties = dict(scheduler_hints=scheduler_hints)
filter_properties['instance_type'] = instance_type
if forced_host:
filter_properties['force_hosts'] = [forced_host]
if forced_node:
filter_properties['force_nodes'] = [forced_node]
return filter_properties
def _provision_instances(self, context, instance_type, min_count,
max_count, base_options, boot_meta, security_groups,
block_device_mapping):
# Reserve quotas
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
LOG.debug(_("Going to run %s instances...") % num_instances)
instances = []
try:
for i in xrange(num_instances):
instance = instance_obj.Instance()
instance.update(base_options)
instance = self.create_db_entry_for_new_instance(
context, instance_type, boot_meta, instance,
security_groups, block_device_mapping,
num_instances, i)
instances.append(instance)
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
notifications.send_update_with_states(context, instance, None,
vm_states.BUILDING, None, None, service="api")
# In the case of any exceptions, attempt DB cleanup and rollback the
# quota reservations.
except Exception:
with excutils.save_and_reraise_exception():
try:
for instance in instances:
try:
instance.destroy()
except exception.ObjectActionError:
pass
finally:
QUOTAS.rollback(context, quota_reservations)
# Commit the reservations
QUOTAS.commit(context, quota_reservations)
return instances
def _get_bdm_image_metadata(self, context, block_device_mapping,
legacy_bdm=True):
"""If we are booting from a volume, we need to get the
volume details from Cinder and make sure we pass the
metadata back accordingly.
"""
if not block_device_mapping:
return {}
for bdm in block_device_mapping:
if legacy_bdm and bdm.get('device_name') != 'vda':
continue
elif not legacy_bdm and bdm.get('boot_index') != 0:
continue
if bdm.get('image_id'):
try:
image_id = bdm['image_id']
image_meta = self.image_service.show(context, image_id)
return image_meta.get('properties', {})
except Exception:
raise exception.InvalidBDMImage(id=image_id)
elif bdm.get('volume_id'):
try:
volume_id = bdm['volume_id']
volume = self.volume_api.get(context, volume_id)
return volume.get('volume_image_metadata', {})
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
return {}
def _create_instance(self, context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_groups,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
reservation_id=None, scheduler_hints=None,
legacy_bdm=True):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation.
"""
# Normalize and setup some parameters
if reservation_id is None:
reservation_id = utils.generate_uid('r')
security_groups = security_groups or ['default']
min_count = min_count or 1
max_count = max_count or min_count
block_device_mapping = block_device_mapping or []
if not instance_type:
instance_type = flavors.get_default_flavor()
if image_href:
image_id, boot_meta = self._get_image(context, image_href)
else:
image_id = None
boot_meta = {}
boot_meta['properties'] = \
self._get_bdm_image_metadata(context,
block_device_mapping, legacy_bdm)
self._check_auto_disk_config(image=boot_meta,
auto_disk_config=auto_disk_config)
handle_az = self._handle_availability_zone
availability_zone, forced_host, forced_node = handle_az(context,
availability_zone)
base_options = self._validate_and_build_base_options(context,
instance_type, boot_meta, image_href, image_id, kernel_id,
ramdisk_id, display_name, display_description,
key_name, key_data, security_groups, availability_zone,
forced_host, user_data, metadata, injected_files, access_ip_v4,
access_ip_v6, requested_networks, config_drive,
block_device_mapping, auto_disk_config, reservation_id)
block_device_mapping = self._check_and_transform_bdm(
base_options, min_count, max_count,
block_device_mapping, legacy_bdm)
instances = self._provision_instances(context, instance_type,
min_count, max_count, base_options, boot_meta, security_groups,
block_device_mapping)
filter_properties = self._build_filter_properties(context,
scheduler_hints, forced_host, forced_node, instance_type)
for instance in instances:
self._record_action_start(context, instance,
instance_actions.CREATE)
self.compute_task_api.build_instances(context,
instances=instances, image=boot_meta,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=False)
return (instances, reservation_id)
@staticmethod
def _volume_size(instance_type, bdm):
size = bdm.get('volume_size')
if size is None and bdm.get('source_type') == 'blank':
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
return size
def _prepare_image_mapping(self, instance_type, instance_uuid, mappings):
"""Extract and format blank devices from image mappings."""
prepared_mappings = []
for bdm in block_device.mappings_prepend_dev(mappings):
LOG.debug(_("Image bdm %s"), bdm, instance_uuid=instance_uuid)
virtual_name = bdm['virtual']
if virtual_name == 'ami' or virtual_name == 'root':
continue
if not block_device.is_swap_or_ephemeral(virtual_name):
continue
guest_format = bdm.get('guest_format')
if virtual_name == 'swap':
guest_format = 'swap'
if not guest_format:
guest_format = CONF.default_ephemeral_format
values = block_device.BlockDeviceDict({
'device_name': bdm['device'],
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': guest_format,
'delete_on_termination': True,
'boot_index': -1})
values['volume_size'] = self._volume_size(
instance_type, values)
if values['volume_size'] == 0:
continue
prepared_mappings.append(values)
return prepared_mappings
def _update_block_device_mapping(self, elevated_context,
instance_type, instance_uuid,
block_device_mapping):
"""tell vm driver to attach volume at boot time by updating
BlockDeviceMapping
"""
LOG.debug(_("block_device_mapping %s"), block_device_mapping,
instance_uuid=instance_uuid)
for bdm in block_device_mapping:
bdm['volume_size'] = self._volume_size(instance_type, bdm)
if bdm.get('volume_size') == 0:
continue
bdm['instance_uuid'] = instance_uuid
self.db.block_device_mapping_update_or_create(elevated_context,
bdm,
legacy=False)
def _validate_bdm(self, context, instance, instance_type, all_mappings):
def _subsequent_list(l):
return all(el + 1 == l[i + 1] for i, el in enumerate(l[:-1]))
# Make sure that the boot indexes make sense
boot_indexes = sorted([bdm['boot_index']
for bdm in all_mappings
if bdm.get('boot_index') is not None
and bdm.get('boot_index') >= 0])
if 0 not in boot_indexes or not _subsequent_list(boot_indexes):
raise exception.InvalidBDMBootSequence()
for bdm in all_mappings:
# NOTE(vish): For now, just make sure the volumes are accessible.
# Additionally, check that the volume can be attached to this
# instance.
snapshot_id = bdm.get('snapshot_id')
volume_id = bdm.get('volume_id')
image_id = bdm.get('image_id')
if (image_id is not None and
image_id != instance.get('image_ref')):
try:
self._get_image(context, image_id)
except Exception:
raise exception.InvalidBDMImage(id=image_id)
elif volume_id is not None:
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context,
volume,
instance=instance)
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
elif snapshot_id is not None:
try:
self.volume_api.get_snapshot(context, snapshot_id)
except Exception:
raise exception.InvalidBDMSnapshot(id=snapshot_id)
ephemeral_size = sum(bdm.get('volume_size') or 0
for bdm in all_mappings
if block_device.new_format_is_ephemeral(bdm))
if ephemeral_size > instance_type['ephemeral_gb']:
raise exception.InvalidBDMEphemeralSize()
# There should be only one swap
swap_list = [bdm for bdm in all_mappings
if block_device.new_format_is_swap(bdm)]
if len(swap_list) > 1:
msg = _("More than one swap drive requested.")
raise exception.InvalidBDMFormat(details=msg)
if swap_list:
swap_size = swap_list[0].get('volume_size') or 0
if swap_size > instance_type['swap']:
raise exception.InvalidBDMSwapSize()
max_local = CONF.max_local_block_devices
if max_local >= 0:
num_local = len([bdm for bdm in all_mappings
if bdm.get('destination_type') == 'local'])
if num_local > max_local:
raise exception.InvalidBDMLocalsLimit()
def _populate_instance_for_bdm(self, context, instance, instance_type,
image, block_device_mapping):
"""Populate instance block device mapping information."""
instance_uuid = instance['uuid']
image_properties = image.get('properties', {})
image_mapping = image_properties.get('mappings', [])
if image_mapping:
image_mapping = self._prepare_image_mapping(instance_type,
instance_uuid, image_mapping)
# NOTE (ndipanov): For now assume that image mapping is legacy
image_bdm = block_device.from_legacy_mapping(
image_properties.get('block_device_mapping', []),
None, instance['root_device_name'])
self._validate_bdm(context, instance, instance_type,
block_device_mapping + image_mapping + image_bdm)
for mapping in (image_mapping, image_bdm, block_device_mapping):
if not mapping:
continue
self._update_block_device_mapping(context,
instance_type, instance_uuid, mapping)
def _populate_instance_shutdown_terminate(self, instance, image,
block_device_mapping):
"""Populate instance shutdown_terminate information."""
image_properties = image.get('properties', {})
if (block_device_mapping or
image_properties.get('mappings') or
image_properties.get('block_device_mapping')):
instance.shutdown_terminate = False
def _populate_instance_names(self, instance, num_instances):
"""Populate instance display_name and hostname."""
display_name = instance.get('display_name')
if instance.obj_attr_is_set('hostname'):
hostname = instance.get('hostname')
else:
hostname = None
if display_name is None:
display_name = self._default_display_name(instance['uuid'])
instance.display_name = display_name
if hostname is None and num_instances == 1:
# NOTE(russellb) In the multi-instance case, we're going to
# overwrite the display_name using the
# multi_instance_display_name_template. We need the default
# display_name set so that it can be used in the template, though.
# Only set the hostname here if we're only creating one instance.
# Otherwise, it will be built after the template based
# display_name.
hostname = display_name
instance.hostname = utils.sanitize_hostname(hostname)
def _default_display_name(self, instance_uuid):
return "Server %s" % instance_uuid
def _populate_instance_for_create(self, instance, image,
index, security_groups, instance_type):
"""Build the beginning of a new instance."""
if not instance.obj_attr_is_set('uuid'):
# Generate the instance_uuid here so we can use it
# for additional setup before creating the DB entry.
instance['uuid'] = str(uuid.uuid4())
instance.launch_index = index
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SCHEDULING
info_cache = instance_info_cache.InstanceInfoCache()
info_cache.instance_uuid = instance.uuid
info_cache.network_info = network_model.NetworkInfo()
instance.info_cache = info_cache
# Store image properties so we can use them later
# (for notifications, etc). Only store what we can.
if not instance.obj_attr_is_set('system_metadata'):
instance.system_metadata = {}
# Make sure we have the dict form that we need for instance_update.
instance['system_metadata'] = utils.instance_sys_meta(instance)
system_meta = utils.get_system_metadata_from_image(
image, instance_type)
# In case we couldn't find any suitable base_image
system_meta.setdefault('image_base_image_ref', instance['image_ref'])
instance['system_metadata'].update(system_meta)
self.security_group_api.populate_security_groups(instance,
security_groups)
return instance
#NOTE(bcwaldon): No policy check since this is only used by scheduler and
# the compute api. That should probably be cleaned up, though.
def create_db_entry_for_new_instance(self, context, instance_type, image,
instance, security_group, block_device_mapping, num_instances,
index):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security group,
etc).
This is called by the scheduler after a location for the
instance has been determined.
"""
self._populate_instance_for_create(instance, image, index,
security_group, instance_type)
self._populate_instance_names(instance, num_instances)
self._populate_instance_shutdown_terminate(instance, image,
block_device_mapping)
self.security_group_api.ensure_default(context)
instance.create(context)
if num_instances > 1:
# NOTE(russellb) We wait until this spot to handle
# multi_instance_display_name_template, because we need
# the UUID from the instance.
instance = self._apply_instance_name_template(context, instance,
index)
# NOTE (ndipanov): This can now raise exceptions but the instance
# has been created, so delete it and re-raise so
# that other cleanup can happen.
try:
self._populate_instance_for_bdm(context, instance,
instance_type, image, block_device_mapping)
except exception.InvalidBDM:
with excutils.save_and_reraise_exception():
self.db.instance_destroy(context, instance['uuid'])
return instance
def _check_create_policies(self, context, availability_zone,
requested_networks, block_device_mapping):
"""Check policies for create()."""
target = {'project_id': context.project_id,
'user_id': context.user_id,
'availability_zone': availability_zone}
check_policy(context, 'create', target)
if requested_networks:
check_policy(context, 'create:attach_network', target)
if block_device_mapping:
check_policy(context, 'create:attach_volume', target)
def _check_multiple_instances_neutron_ports(self, requested_networks):
"""Check whether multiple instances are created from port id(s)."""
for net, ip, port in requested_networks:
if port:
msg = _("Unable to launch multiple instances with"
" a single configured port ID. Please launch your"
" instance one by one with different ports.")
raise exception.MultiplePortsNotApplicable(reason=msg)
@hooks.add_hook("create_instance")
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=None, max_count=None,
display_name=None, display_description=None,
key_name=None, key_data=None, security_group=None,
availability_zone=None, user_data=None, metadata=None,
injected_files=None, admin_password=None,
block_device_mapping=None, access_ip_v4=None,
access_ip_v6=None, requested_networks=None, config_drive=None,
auto_disk_config=None, scheduler_hints=None, legacy_bdm=True):
"""
Provision instances, sending instance information to the
scheduler. The scheduler will determine where the instance(s)
go and will handle creating the DB entries.
Returns a tuple of (instances, reservation_id)
"""
self._check_create_policies(context, availability_zone,
requested_networks, block_device_mapping)
if requested_networks and max_count > 1 and utils.is_neutron():
self._check_multiple_instances_neutron_ports(requested_networks)
return self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_group,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm)
def trigger_provider_fw_rules_refresh(self, context):
"""Called when a rule is added/removed from a provider firewall."""
services = service_obj.ServiceList.get_all_by_topic(context,
CONF.compute_topic)
for service in services:
host_name = service.host
self.compute_rpcapi.refresh_provider_fw_rules(context, host_name)
@wrap_check_policy
def update(self, context, instance, **kwargs):
"""Updates the instance in the datastore.
:param context: The security context
:param instance: The instance to update
:param kwargs: All additional keyword args are treated
as data fields of the instance to be
updated
:returns: A reference to the updated instance
"""
refs = self._update(context, instance, **kwargs)
return refs[1]
def _update(self, context, instance, **kwargs):
# Update the instance record and send a state update notification
# if task or vm state changed
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance['uuid'], kwargs)
notifications.send_update(context, old_ref,
instance_ref, service="api")
return dict(old_ref.iteritems()), dict(instance_ref.iteritems())
def _check_auto_disk_config(self, instance=None, image=None,
**extra_instance_updates):
auto_disk_config = extra_instance_updates.get("auto_disk_config")
if auto_disk_config is None:
return
if not image and not instance:
return
if image:
image_props = image.get("properties", {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_props)
image_ref = image.get("id")
else:
sys_meta = utils.instance_sys_meta(instance)
image_ref = sys_meta.get('image_base_image_ref')
auto_disk_config_img = \
utils.get_auto_disk_config_from_instance(sys_meta=sys_meta)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image_ref)
def _delete(self, context, instance, delete_type, cb, **instance_attrs):
if instance['disable_terminate']:
LOG.info(_('instance termination disabled'),
instance=instance)
return
host = instance['host']
bdms = block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid']))
reservations = None
if context.is_admin and context.project_id != instance['project_id']:
project_id = instance['project_id']
else:
project_id = context.project_id
if context.user_id != instance['user_id']:
user_id = instance['user_id']
else:
user_id = context.user_id
try:
# NOTE(maoy): no expected_task_state needs to be set
instance.update(instance_attrs)
instance.progress = 0
instance.save()
new_type_id = instance.instance_type_id
# NOTE(comstud): If we delete the instance locally, we'll
# commit the reservations here. Otherwise, the manager side
# will commit or rollback the reservations based on success.
reservations = self._create_reservations(context,
instance,
new_type_id,
project_id, user_id)
if self.cell_type == 'api':
# NOTE(comstud): If we're in the API cell, we need to
# skip all remaining logic and just call the callback,
# which will cause a cast to the child cell. Also,
# commit reservations here early until we have a better
# way to deal with quotas with cells.
cb(context, instance, bdms, reservations=None)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
return
if not host:
try:
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.start" % delete_type)
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.end" % delete_type,
system_metadata=instance.system_metadata)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
return
except exception.ObjectActionError:
instance.refresh()
if instance['vm_state'] == vm_states.RESIZED:
self._confirm_resize_on_deleting(context, instance)
is_up = False
try:
service = service_obj.Service.get_by_compute_host(
context.elevated(), instance.host)
if self.servicegroup_api.service_is_up(service):
is_up = True
self._record_action_start(context, instance,
instance_actions.DELETE)
cb(context, instance, bdms, reservations=reservations)
except exception.ComputeHostNotFound:
pass
if not is_up:
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms, delete_type, cb)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
reservations = None
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id,
user_id=user_id)
except Exception:
with excutils.save_and_reraise_exception():
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id,
user_id=user_id)
def _confirm_resize_on_deleting(self, context, instance):
# If in the middle of a resize, use confirm_resize to
# ensure the original instance is cleaned up too
mig_cls = migration_obj.Migration
migration = None
for status in ('finished', 'confirming'):
try:
migration = mig_cls.get_by_instance_and_status(
context.elevated(), instance.uuid, status)
LOG.info(_('Found an unconfirmed migration during delete, '
'id: %(id)s, status: %(status)s') %
{'id': migration.id,
'status': migration.status},
context=context, instance=instance)
break
except exception.MigrationNotFoundByStatus:
pass
if not migration:
LOG.info(_('Instance may have been confirmed during delete'),
context=context, instance=instance)
return
src_host = migration.source_compute
# Call since this can race with the terminate_instance.
# The resize is done but awaiting confirmation/reversion,
# so there are two cases:
# 1. up-resize: here -instance['vcpus'/'memory_mb'] match
# the quota usages accounted for this instance,
# so no further quota adjustment is needed
# 2. down-resize: here -instance['vcpus'/'memory_mb'] are
# shy by delta(old, new) from the quota usages accounted
# for this instance, so we must adjust
try:
deltas = self._downsize_quota_delta(context, instance)
except KeyError:
LOG.info(_('Migration %s may have been confirmed during delete') %
migration.id, context=context, instance=instance)
return
downsize_reservations = self._reserve_quota_delta(context,
deltas)
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance, migration,
src_host, downsize_reservations,
cast=False)
def _create_reservations(self, context, old_instance, new_instance_type_id,
project_id, user_id):
instance_vcpus = old_instance['vcpus']
instance_memory_mb = old_instance['memory_mb']
# NOTE(wangpan): if the instance is resizing, and the resources
# are updated to new instance type, we should use
# the old instance type to create reservation.
# see https://bugs.launchpad.net/nova/+bug/1099729 for more details
if old_instance['task_state'] in (task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH):
Migration = migration_obj.Migration
try:
migration = Migration.get_by_instance_and_status(
context.elevated(), old_instance.uuid, 'post-migrating')
except exception.MigrationNotFoundByStatus:
migration = None
if (migration and
new_instance_type_id ==
migration.new_instance_type_id):
old_inst_type_id = migration.old_instance_type_id
try:
old_inst_type = flavors.get_flavor(old_inst_type_id)
except exception.InstanceTypeNotFound:
LOG.warning(_("instance type %d not found"),
old_inst_type_id)
pass
else:
instance_vcpus = old_inst_type['vcpus']
instance_memory_mb = old_inst_type['memory_mb']
LOG.debug(_("going to delete a resizing instance"))
reservations = QUOTAS.reserve(context,
project_id=project_id,
user_id=user_id,
instances=-1,
cores=-instance_vcpus,
ram=-instance_memory_mb)
return reservations
def _local_delete(self, context, instance, bdms, delete_type, cb):
LOG.warning(_("instance's host %s is down, deleting from "
"database") % instance['host'], instance=instance)
instance_uuid = instance['uuid']
instance.info_cache.delete()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.start" % delete_type)
elevated = context.elevated()
if self.cell_type != 'api':
self.network_api.deallocate_for_instance(elevated,
instance)
system_meta = self.db.instance_system_metadata_get(context,
instance_uuid)
# cleanup volumes
for bdm in bdms:
if bdm['volume_id']:
# NOTE(vish): We don't have access to correct volume
# connector info, so just pass a fake
# connector. This can be improved when we
# expose get_volume_connector to rpc.
connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
try:
self.volume_api.terminate_connection(context,
bdm['volume_id'],
connector)
self.volume_api.detach(elevated, bdm['volume_id'])
if bdm['delete_on_termination']:
self.volume_api.delete(context, bdm['volume_id'])
except Exception as exc:
err_str = _("Ignoring volume cleanup failure due to %s")
LOG.warn(err_str % exc, instance=instance)
self.db.block_device_mapping_destroy(context, bdm['id'])
cb(context, instance, bdms, local=True)
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.end" % delete_type,
system_metadata=system_meta)
def _do_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.terminate_instance(context, instance, bdms,
reservations=reservations)
def _do_soft_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.soft_delete_instance(context, instance,
reservations=reservations)
# NOTE(maoy): we allow delete to be called no matter what vm_state says.
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=True)
def soft_delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_('Going to try to soft delete instance'),
instance=instance)
self._delete(context, instance, 'soft_delete', self._do_soft_delete,
task_state=task_states.SOFT_DELETING,
deleted_at=timeutils.utcnow())
def _delete_instance(self, context, instance):
self._delete(context, instance, 'delete', self._do_delete,
task_state=task_states.DELETING)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=False)
def delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_("Going to try to terminate instance"), instance=instance)
self._delete_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
# Reserve quotas
instance_type = flavors.extract_flavor(instance)
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, 1, 1)
self._record_action_start(context, instance, instance_actions.RESTORE)
try:
if instance['host']:
instance = self.update(context, instance,
task_state=task_states.RESTORING,
expected_task_state=None,
deleted_at=None)
self.compute_rpcapi.restore_instance(context, instance)
else:
self.update(context,
instance,
vm_state=vm_states.ACTIVE,
task_state=None,
expected_task_state=None,
deleted_at=None)
QUOTAS.commit(context, quota_reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, quota_reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED],
must_have_launched=False)
def force_delete(self, context, instance):
"""Force delete a previously deleted (but not reclaimed) instance."""
self._delete_instance(context, instance)
def force_stop(self, context, instance, do_cast=True):
LOG.debug(_("Going to try to stop instance"), instance=instance)
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.STOP)
self.compute_rpcapi.stop_instance(context, instance, do_cast=do_cast)
@wrap_check_policy
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED,
vm_states.ERROR],
task_state=[None])
def stop(self, context, instance, do_cast=True):
"""Stop an instance."""
self.force_stop(context, instance, do_cast)
@wrap_check_policy
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
LOG.debug(_("Going to try to start instance"), instance=instance)
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.START)
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
self.compute_rpcapi.start_instance(context, instance)
#NOTE(bcwaldon): no policy check here since it should be rolled in to
# search_opts in get_all
def get_active_by_window(self, context, begin, end=None, project_id=None):
"""Get instances that were continuously active over a window."""
return self.db.instance_get_active_by_window_joined(context, begin,
end, project_id)
#NOTE(bcwaldon): this doesn't really belong in this class
def get_instance_type(self, context, instance_type_id):
"""Get an instance type by instance type id."""
return flavors.get_flavor(instance_type_id, ctxt=context)
def get(self, context, instance_id, want_objects=False):
"""Get a single instance with the given instance_id."""
# NOTE(ameade): we still need to support integer ids for ec2
expected_attrs = ['metadata', 'system_metadata',
'security_groups', 'info_cache']
try:
if uuidutils.is_uuid_like(instance_id):
instance = instance_obj.Instance.get_by_uuid(
context, instance_id, expected_attrs=expected_attrs)
elif utils.is_int_like(instance_id):
instance = instance_obj.Instance.get_by_id(
context, instance_id, expected_attrs=expected_attrs)
else:
raise exception.InstanceNotFound(instance_id=instance_id)
except exception.InvalidID:
raise exception.InstanceNotFound(instance_id=instance_id)
check_policy(context, 'get', instance)
if not want_objects:
instance = obj_base.obj_to_primitive(instance)
return instance
def get_all(self, context, search_opts=None, sort_key='created_at',
sort_dir='desc', limit=None, marker=None, want_objects=False):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retrieve
all instances in the system.
Deleted instances will be returned by default, unless there is a
search option that says otherwise.
The results will be returned sorted in the order specified by the
'sort_dir' parameter using the key specified in the 'sort_key'
parameter.
"""
#TODO(bcwaldon): determine the best argument for target here
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
check_policy(context, "get_all", target)
if search_opts is None:
search_opts = {}
if 'all_tenants' in search_opts:
check_policy(context, "get_all_tenants", target)
LOG.debug(_("Searching by: %s") % str(search_opts))
# Fixups for the DB call
filters = {}
def _remap_flavor_filter(flavor_id):
instance_type = flavors.get_flavor_by_flavor_id(
flavor_id)
filters['instance_type_id'] = instance_type['id']
def _remap_fixed_ip_filter(fixed_ip):
# Turn fixed_ip into a regexp match. Since '.' matches
# any character, we need to use regexp escaping for it.
filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.')
# search_option to filter_name mapping.
filter_mapping = {
'image': 'image_ref',
'name': 'display_name',
'tenant_id': 'project_id',
'flavor': _remap_flavor_filter,
'fixed_ip': _remap_fixed_ip_filter}
# copy from search_opts, doing various remappings as necessary
for opt, value in search_opts.iteritems():
# Do remappings.
# Values not in the filter_mapping table are copied as-is.
# If remapping is None, option is not copied
# If the remapping is a string, it is the filter_name to use
try:
remap_object = filter_mapping[opt]
except KeyError:
filters[opt] = value
else:
# Remaps are strings to translate to, or functions to call
# to do the translating as defined by the table above.
if isinstance(remap_object, basestring):
filters[remap_object] = value
else:
try:
remap_object(value)
# We already know we can't match the filter, so
# return an empty list
except ValueError:
return []
inst_models = self._get_instances_by_filters(context, filters,
sort_key, sort_dir,
limit=limit,
marker=marker)
if want_objects:
return inst_models
# Convert the models to dictionaries
instances = []
for inst_model in inst_models:
instances.append(obj_base.obj_to_primitive(inst_model))
return instances
def _get_instances_by_filters(self, context, filters,
sort_key, sort_dir,
limit=None,
marker=None):
if 'ip6' in filters or 'ip' in filters:
res = self.network_api.get_instance_uuids_by_ip_filter(context,
filters)
# NOTE(jkoelker) It is possible that we will get the same
# instance uuid twice (one for ipv4 and ipv6)
uuids = set([r['instance_uuid'] for r in res])
filters['uuid'] = uuids
fields = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
return instance_obj.InstanceList.get_by_filters(
context, filters=filters, sort_key=sort_key, sort_dir=sort_dir,
limit=limit, marker=marker, expected_attrs=fields)
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED])
def live_snapshot(self, context, instance, name, extra_properties=None,
image_id=None):
"""Live Snapshot the given instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: A dict containing image metadata
"""
if image_id:
# The image entry has already been created, so just pull the
# metadata.
image_meta = self.image_service.show(context, image_id)
else:
image_meta = self._create_image(context, instance, name,
'snapshot', extra_properties=extra_properties)
instance = self.update(context, instance,
task_state=task_states.IMAGE_LIVE_SNAPSHOT,
expected_task_state=None)
self.compute_rpcapi.live_snapshot_instance(context, instance=instance,
image_id=image_meta['id'])
return image_meta
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
"""Backup the given instance
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the backup
:param backup_type: 'daily' or 'weekly'
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
when creating the image.
"""
props_copy = dict(extra_properties, backup_type=backup_type)
image_meta = self._create_image(context, instance, name,
'backup', extra_properties=props_copy)
# NOTE(comstud): Any changes to this method should also be made
# to the backup_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_BACKUP
instance.save(expected_task_state=None)
self.compute_rpcapi.backup_instance(context, instance,
image_meta['id'],
backup_type,
rotation)
return image_meta
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def snapshot(self, context, instance, name, extra_properties=None):
"""Snapshot the given instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the snapshot
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
image_meta = self._create_image(context, instance, name,
'snapshot',
extra_properties=extra_properties)
# NOTE(comstud): Any changes to this method should also be made
# to the snapshot_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_SNAPSHOT
instance.save(expected_task_state=None)
self.compute_rpcapi.snapshot_instance(context, instance,
image_meta['id'])
return image_meta
def _create_image(self, context, instance, name, image_type,
extra_properties=None):
"""Create new image entry in the image service. This new image
will be reserved for the compute manager to upload a snapshot
or backup.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param name: string for name of the snapshot
:param image_type: snapshot | backup
:param backup_type: daily | weekly
:param extra_properties: dict of extra image properties to include
"""
if extra_properties is None:
extra_properties = {}
instance_uuid = instance['uuid']
properties = {
'instance_uuid': instance_uuid,
'user_id': str(context.user_id),
'image_type': image_type,
}
image_ref = instance.image_ref
sent_meta = compute_utils.get_image_metadata(
context, self.image_service, image_ref, instance)
sent_meta['name'] = name
sent_meta['is_public'] = False
# The properties set up above and in extra_properties have precedence
properties.update(extra_properties or {})
sent_meta['properties'].update(properties)
return self.image_service.create(context, sent_meta)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def snapshot_volume_backed(self, context, instance, image_meta, name,
extra_properties=None):
"""Snapshot the given volume-backed instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param image_meta: metadata for the new image
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: the new image metadata
"""
image_meta['name'] = name
properties = image_meta['properties']
if instance['root_device_name']:
properties['root_device_name'] = instance['root_device_name']
properties.update(extra_properties or {})
bdms = self.get_instance_bdms(context, instance)
mapping = []
for bdm in bdms:
if bdm['no_device']:
continue
volume_id = bdm.get('volume_id')
if volume_id:
# create snapshot based on volume_id
volume = self.volume_api.get(context, volume_id)
# NOTE(yamahata): Should we wait for snapshot creation?
# Linux LVM snapshot creation completes in
# short time, it doesn't matter for now.
name = _('snapshot for %s') % image_meta['name']
snapshot = self.volume_api.create_snapshot_force(
context, volume['id'], name, volume['display_description'])
bdm['snapshot_id'] = snapshot['id']
bdm['volume_id'] = None
mapping.append(bdm)
for m in block_device.mappings_prepend_dev(properties.get('mappings',
[])):
virtual_name = m['virtual']
if virtual_name in ('ami', 'root'):
continue
assert block_device.is_swap_or_ephemeral(virtual_name)
device_name = m['device']
if device_name in [b['device_name'] for b in mapping
if not b.get('no_device', False)]:
continue
# NOTE(yamahata): swap and ephemeral devices are specified in
# AMI, but disabled for this instance by user.
# So disable those device by no_device.
mapping.append({'device_name': device_name, 'no_device': True})
if mapping:
properties['block_device_mapping'] = mapping
for attr in ('status', 'location', 'id'):
image_meta.pop(attr, None)
# the new image is simply a bucket of properties (particularly the
# block device mapping, kernel and ramdisk IDs) with no image data,
# hence the zero size
image_meta['size'] = 0
return self.image_service.create(context, image_meta, data='')
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED,
vm_states.ERROR],
task_state=[None, task_states.REBOOTING,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.PAUSING,
task_states.SUSPENDING])
def reboot(self, context, instance, reboot_type):
"""Reboot the given instance."""
if (reboot_type == 'SOFT' and
(instance['vm_state'] in [vm_states.STOPPED,
vm_states.PAUSED,
vm_states.SUSPENDED,
vm_states.ERROR])):
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method='reboot')
if ((reboot_type == 'SOFT' and
instance['task_state'] == task_states.REBOOTING) or
(reboot_type == 'HARD' and
instance['task_state'] == task_states.REBOOTING_HARD)):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method='reboot')
state = {'SOFT': task_states.REBOOTING,
'HARD': task_states.REBOOTING_HARD}[reboot_type]
instance.task_state = state
instance.save(expected_task_state=[None, task_states.REBOOTING])
elevated = context.elevated()
self._record_action_start(context, instance, instance_actions.REBOOT)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=None,
reboot_type=reboot_type)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR],
task_state=[None])
def rebuild(self, context, instance, image_href, admin_password, **kwargs):
"""Rebuild the given instance with the provided attributes."""
orig_image_ref = instance['image_ref'] or ''
files_to_inject = kwargs.pop('files_to_inject', [])
metadata = kwargs.get('metadata', {})
image_id, image = self._get_image(context, image_href)
self._check_auto_disk_config(image=image, **kwargs)
instance_type = flavors.extract_flavor(instance)
self._checks_for_create_and_rebuild(context, image_id, image,
instance_type, metadata, files_to_inject)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, None, None, image)
def _reset_image_metadata():
"""
Remove old image properties that we're storing as instance
system metadata. These properties start with 'image_'.
Then add the properties for the new image.
"""
# FIXME(comstud): There's a race condition here in that
# if the system_metadata for this instance is updated
# after we do the get and before we update.. those other
# updates will be lost. Since this problem exists in a lot
# of other places, I think it should be addressed in a DB
# layer overhaul.
sys_metadata = self.db.instance_system_metadata_get(context,
instance['uuid'])
orig_sys_metadata = dict(sys_metadata)
# Remove the old keys
for key in sys_metadata.keys():
if key.startswith(utils.SM_IMAGE_PROP_PREFIX):
del sys_metadata[key]
# Add the new ones
new_sys_metadata = utils.get_system_metadata_from_image(
image, instance_type)
sys_metadata.update(new_sys_metadata)
self.db.instance_system_metadata_update(context,
instance['uuid'], sys_metadata, True)
return orig_sys_metadata
instance = self.update(context, instance,
task_state=task_states.REBUILDING,
expected_task_state=None,
# Unfortunately we need to set image_ref early,
# so API users can see it.
image_ref=image_href, kernel_id=kernel_id or "",
ramdisk_id=ramdisk_id or "",
progress=0, **kwargs)
# On a rebuild, since we're potentially changing images, we need to
# wipe out the old image properties that we're storing as instance
# system metadata... and copy in the properties for the new image.
orig_sys_metadata = _reset_image_metadata()
bdms = block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(
context,
instance['uuid']))
self._record_action_start(context, instance, instance_actions.REBUILD)
self.compute_rpcapi.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
elevated = context.elevated()
migration = migration_obj.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reverse quota reservation for increased resource usage
deltas = self._reverse_upsize_quota_delta(context, migration)
reservations = self._reserve_quota_delta(context, deltas)
instance.task_state = task_states.RESIZE_REVERTING
instance.save(expected_task_state=None)
migration.status = 'reverting'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable and reservations:
QUOTAS.commit(context, reservations)
reservations = []
self._record_action_start(context, instance,
instance_actions.REVERT_RESIZE)
self.compute_rpcapi.revert_resize(context, instance,
migration,
migration.dest_compute,
reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def confirm_resize(self, context, instance, migration=None):
"""Confirms a migration/resize and deletes the 'old' instance."""
elevated = context.elevated()
if migration is None:
migration = migration_obj.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reserve quota only for any decrease in resource usage
deltas = self._downsize_quota_delta(context, instance)
reservations = self._reserve_quota_delta(context, deltas)
migration.status = 'confirming'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable and reservations:
QUOTAS.commit(context, reservations)
reservations = []
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance,
migration,
migration.source_compute,
reservations)
@staticmethod
def _resize_quota_delta(context, new_instance_type,
old_instance_type, sense, compare):
"""
Calculate any quota adjustment required at a particular point
in the resize cycle.
:param context: the request context
:param new_instance_type: the target instance type
:param old_instance_type: the original instance type
:param sense: the sense of the adjustment, 1 indicates a
forward adjustment, whereas -1 indicates a
reversal of a prior adjustment
:param compare: the direction of the comparison, 1 indicates
we're checking for positive deltas, whereas
-1 indicates negative deltas
"""
def _quota_delta(resource):
return sense * (new_instance_type[resource] -
old_instance_type[resource])
deltas = {}
if compare * _quota_delta('vcpus') > 0:
deltas['cores'] = _quota_delta('vcpus')
if compare * _quota_delta('memory_mb') > 0:
deltas['ram'] = _quota_delta('memory_mb')
return deltas
@staticmethod
def _upsize_quota_delta(context, new_instance_type, old_instance_type):
"""
Calculate deltas required to adjust quota for an instance upsize.
"""
return API._resize_quota_delta(context, new_instance_type,
old_instance_type, 1, 1)
@staticmethod
def _reverse_upsize_quota_delta(context, migration_ref):
"""
Calculate deltas required to reverse a prior upsizing
quota adjustment.
"""
old_instance_type = flavors.get_flavor(
migration_ref['old_instance_type_id'])
new_instance_type = flavors.get_flavor(
migration_ref['new_instance_type_id'])
return API._resize_quota_delta(context, new_instance_type,
old_instance_type, -1, -1)
@staticmethod
def _downsize_quota_delta(context, instance):
"""
Calculate deltas required to adjust quota for an instance downsize.
"""
old_instance_type = flavors.extract_flavor(instance,
'old_')
new_instance_type = flavors.extract_flavor(instance,
'new_')
return API._resize_quota_delta(context, new_instance_type,
old_instance_type, 1, -1)
@staticmethod
def _reserve_quota_delta(context, deltas, project_id=None):
if not deltas:
return
return QUOTAS.reserve(context, project_id=project_id, **deltas)
@staticmethod
def _resize_cells_support(context, reservations, instance,
current_instance_type, new_instance_type):
"""Special API cell logic for resize."""
if reservations:
# With cells, the best we can do right now is commit the
# reservations immediately...
QUOTAS.commit(context, reservations,
project_id=instance.project_id)
# NOTE(johannes/comstud): The API cell needs a local migration
# record for later resize_confirm and resize_reverts to deal
# with quotas. We don't need source and/or destination
# information, just the old and new flavors. Status is set to
# 'finished' since nothing else will update the status along
# the way.
mig = migration_obj.Migration()
mig.instance_uuid = instance.uuid
mig.old_instance_type_id = current_instance_type['id']
mig.new_instance_type_id = new_instance_type['id']
mig.status = 'finished'
mig.create(context.elevated())
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def resize(self, context, instance, flavor_id=None,
**extra_instance_updates):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
self._check_auto_disk_config(instance, **extra_instance_updates)
current_instance_type = flavors.extract_flavor(instance)
# If flavor_id is not provided, only migrate the instance.
if not flavor_id:
LOG.debug(_("flavor_id is None. Assuming migration."),
instance=instance)
new_instance_type = current_instance_type
else:
new_instance_type = flavors.get_flavor_by_flavor_id(
flavor_id, read_deleted="no")
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug(_("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s"),
{'current_instance_type_name': current_instance_type_name,
'new_instance_type_name': new_instance_type_name},
instance=instance)
# FIXME(sirp): both of these should raise InstanceTypeNotFound instead
if not new_instance_type:
raise exception.FlavorNotFound(flavor_id=flavor_id)
same_instance_type = (current_instance_type['id'] ==
new_instance_type['id'])
# NOTE(sirp): We don't want to force a customer to change their flavor
# when Ops is migrating off of a failed host.
if not same_instance_type and new_instance_type.get('disabled'):
raise exception.FlavorNotFound(flavor_id=flavor_id)
if same_instance_type and flavor_id:
raise exception.CannotResizeToSameFlavor()
# ensure there is sufficient headroom for upsizes
deltas = self._upsize_quota_delta(context, new_instance_type,
current_instance_type)
try:
reservations = self._reserve_quota_delta(context, deltas,
project_id=instance[
'project_id'])
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
usages = exc.kwargs['usages']
overs = exc.kwargs['overs']
headroom = dict((res, quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved']))
for res in quotas.keys())
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance."),
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=deltas[resource],
used=used, allowed=total_allowed,
resource=resource)
instance.task_state = task_states.RESIZE_PREP
instance.progress = 0
instance.update(extra_instance_updates)
instance.save(expected_task_state=None)
filter_properties = {'ignore_hosts': []}
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance['host'])
# Here when flavor_id is None, the process is considered as migrate.
if (not flavor_id and not CONF.allow_migrate_to_same_host):
filter_properties['ignore_hosts'].append(instance['host'])
if self.cell_type == 'api':
# Commit reservations early and create migration record.
self._resize_cells_support(context, reservations, instance,
current_instance_type,
new_instance_type)
reservations = []
self._record_action_start(context, instance, instance_actions.RESIZE)
scheduler_hint = {'filter_properties': filter_properties}
self.compute_task_api.resize_instance(context, instance,
extra_instance_updates, scheduler_hint=scheduler_hint,
flavor=new_instance_type, reservations=reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED],
task_state=[None])
def shelve(self, context, instance):
"""Shelve an instance.
Shuts down an instance and frees it up to be removed from the
hypervisor.
"""
instance.task_state = task_states.SHELVING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.SHELVE)
image_id = None
if not self.is_volume_backed_instance(context, instance):
name = '%s-shelved' % instance['display_name']
image_meta = self._create_image(context, instance, name,
'snapshot')
image_id = image_meta['id']
self.compute_rpcapi.shelve_instance(context, instance=instance,
image_id=image_id)
else:
self.compute_rpcapi.shelve_offload_instance(context,
instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED], task_state=[None])
def shelve_offload(self, context, instance):
"""Remove a shelved instance from the hypervisor."""
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save(expected_task_state=None)
self.compute_rpcapi.shelve_offload_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED], task_state=[None])
def unshelve(self, context, instance):
"""Restore a shelved instance."""
instance.task_state = task_states.UNSHELVING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.UNSHELVE)
self.compute_task_api.unshelve_instance(context, instance)
@wrap_check_policy
@check_instance_lock
def add_fixed_ip(self, context, instance, network_id):
"""Add fixed_ip from specified network to given instance."""
self.compute_rpcapi.add_fixed_ip_to_instance(context,
instance=instance, network_id=network_id)
@wrap_check_policy
@check_instance_lock
def remove_fixed_ip(self, context, instance, address):
"""Remove fixed_ip from specified network to given instance."""
self.compute_rpcapi.remove_fixed_ip_from_instance(context,
instance=instance, address=address)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def pause(self, context, instance):
"""Pause the given instance."""
instance.task_state = task_states.PAUSING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.PAUSE)
self.compute_rpcapi.pause_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.PAUSED])
def unpause(self, context, instance):
"""Unpause the given instance."""
instance.task_state = task_states.UNPAUSING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.UNPAUSE)
self.compute_rpcapi.unpause_instance(context, instance)
@wrap_check_policy
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def suspend(self, context, instance):
"""Suspend the given instance."""
instance.task_state = task_states.SUSPENDING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.SUSPEND)
self.compute_rpcapi.suspend_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
"""Resume the given instance."""
instance.task_state = task_states.RESUMING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.RESUME)
self.compute_rpcapi.resume_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rescue(self, context, instance, rescue_password=None):
"""Rescue the given instance."""
bdms = self.get_instance_bdms(context, instance, legacy=False)
for bdm in bdms:
if bdm['volume_id']:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.check_attached(context, volume)
# TODO(ndipanov): This check can be generalized as a decorator to
# check for valid combinations of src and dests - for now check
# if it's booted from volume only
if self.is_volume_backed_instance(context, instance, bdms):
reason = _("Cannot rescue a volume-backed instance")
raise exception.InstanceNotRescuable(instance_id=instance['uuid'],
reason=reason)
self.update(context,
instance,
task_state=task_states.RESCUING,
expected_task_state=None)
self._record_action_start(context, instance, instance_actions.RESCUE)
self.compute_rpcapi.rescue_instance(context, instance=instance,
rescue_password=rescue_password)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESCUED])
def unrescue(self, context, instance):
"""Unrescue the given instance."""
self.update(context,
instance,
task_state=task_states.UNRESCUING,
expected_task_state=None)
self._record_action_start(context, instance, instance_actions.UNRESCUE)
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE])
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance."""
self.update(context,
instance,
task_state=task_states.UPDATING_PASSWORD,
expected_task_state=None)
self._record_action_start(context, instance,
instance_actions.CHANGE_PASSWORD)
self.compute_rpcapi.set_admin_password(context,
instance=instance,
new_pass=password)
@wrap_check_policy
@check_instance_lock
def inject_file(self, context, instance, path, file_contents):
"""Write a file to the given instance."""
self.compute_rpcapi.inject_file(context, instance=instance, path=path,
file_contents=file_contents)
@wrap_check_policy
@check_instance_host
def get_vnc_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_vnc_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_spice_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_spice_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_console_output(self, context, instance, tail_length=None):
"""Get console output for an instance."""
return self.compute_rpcapi.get_console_output(context,
instance=instance, tail_length=tail_length)
@wrap_check_policy
def lock(self, context, instance):
"""Lock the given instance."""
# Only update the lock if we are an admin (non-owner)
is_owner = instance.project_id == context.project_id
if instance.locked and is_owner:
return
context = context.elevated()
LOG.debug(_('Locking'), context=context, instance=instance)
instance.locked = True
instance.locked_by = 'owner' if is_owner else 'admin'
instance.save()
@wrap_check_policy
def unlock(self, context, instance):
"""Unlock the given instance."""
# If the instance was locked by someone else, check
# that we're allowed to override the lock
is_owner = instance.project_id == context.project_id
expect_locked_by = 'owner' if is_owner else 'admin'
locked_by = instance.locked_by
if locked_by and locked_by != expect_locked_by:
check_policy(context, 'unlock_override', instance)
context = context.elevated()
LOG.debug(_('Unlocking'), context=context, instance=instance)
instance.locked = False
instance.locked_by = None
instance.save()
@wrap_check_policy
def get_lock(self, context, instance):
"""Return the boolean state of given instance's lock."""
return self.get(context, instance['uuid'])['locked']
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def reset_network(self, context, instance):
"""Reset networking on the instance."""
self.compute_rpcapi.reset_network(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def inject_network_info(self, context, instance):
"""Inject network info for the instance."""
self.compute_rpcapi.inject_network_info(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def attach_volume(self, context, instance, volume_id, device=None):
"""Attach an existing volume to an existing instance."""
# NOTE(vish): Fail fast if the device is not going to pass. This
# will need to be removed along with the test if we
# change the logic in the manager for what constitutes
# a valid device.
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
# NOTE(vish): This is done on the compute host because we want
# to avoid a race where two devices are requested at
# the same time. When db access is removed from
# compute, the bdm will be created here and we will
# have to make sure that they are assigned atomically.
device = self.compute_rpcapi.reserve_block_device_name(
context, device=device, instance=instance, volume_id=volume_id)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
self.volume_api.reserve_volume(context, volume_id)
self.compute_rpcapi.attach_volume(context, instance=instance,
volume_id=volume_id, mountpoint=device)
except Exception:
with excutils.save_and_reraise_exception():
self.db.block_device_mapping_destroy_by_instance_and_device(
context, instance['uuid'], device)
return device
def _detach_volume(self, context, instance, volume):
"""Detach volume from instance. This method is separated to make
it easier for cells version to override.
"""
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume['id'])
self.compute_rpcapi.detach_volume(context, instance=instance,
volume_id=volume['id'])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
if volume['attach_status'] == 'detached':
msg = _("Volume must be attached in order to detach.")
raise exception.InvalidVolume(reason=msg)
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if volume['instance_uuid'] != instance['uuid']:
raise exception.VolumeUnattached(volume_id=volume['id'])
self._detach_volume(context, instance, volume)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def swap_volume(self, context, instance, old_volume, new_volume):
"""Swap volume attached to an instance."""
if old_volume['attach_status'] == 'detached':
raise exception.VolumeUnattached(volume_id=old_volume['id'])
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if old_volume['instance_uuid'] != instance['uuid']:
msg = _("Old volume is attached to a different instance.")
raise exception.InvalidVolume(reason=msg)
if new_volume['attach_status'] == 'attached':
msg = _("New volume must be detached in order to swap.")
raise exception.InvalidVolume(reason=msg)
if int(new_volume['size']) < int(old_volume['size']):
msg = _("New volume must be the same size or larger.")
raise exception.InvalidVolume(reason=msg)
self.volume_api.check_detach(context, old_volume)
self.volume_api.check_attach(context, new_volume, instance=instance)
self.volume_api.begin_detaching(context, old_volume['id'])
self.volume_api.reserve_volume(context, new_volume['id'])
try:
self.compute_rpcapi.swap_volume(
context, instance=instance,
old_volume_id=old_volume['id'],
new_volume_id=new_volume['id'])
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
self.volume_api.roll_detaching(context, old_volume['id'])
self.volume_api.unreserve_volume(context, new_volume['id'])
@wrap_check_policy
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
return self.compute_rpcapi.attach_interface(context,
instance=instance, network_id=network_id, port_id=port_id,
requested_ip=requested_ip)
@wrap_check_policy
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
self.compute_rpcapi.detach_interface(context, instance=instance,
port_id=port_id)
@wrap_check_policy
def get_instance_metadata(self, context, instance):
"""Get all metadata associated with an instance."""
rv = self.db.instance_metadata_get(context, instance['uuid'])
return dict(rv.iteritems())
def get_all_instance_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='metadata')
def get_all_system_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='system_metadata')
def _get_all_instance_metadata(self, context, search_filts, metadata_type):
"""Get all metadata."""
def _match_any(pattern_list, string):
return any([re.match(pattern, string)
for pattern in pattern_list])
def _filter_metadata(instance, search_filt, input_metadata):
uuids = search_filt.get('resource_id', [])
keys_filter = search_filt.get('key', [])
values_filter = search_filt.get('value', [])
output_metadata = {}
if uuids and instance.get('uuid') not in uuids:
return {}
for (k, v) in input_metadata.iteritems():
# Both keys and value defined -- AND
if ((keys_filter and values_filter) and
not _match_any(keys_filter, k) and
not _match_any(values_filter, v)):
continue
# Only keys or value is defined
elif ((keys_filter and not _match_any(keys_filter, k)) or
(values_filter and not _match_any(values_filter, v))):
continue
output_metadata[k] = v
return output_metadata
formatted_metadata_list = []
instances = self._get_instances_by_filters(context, filters={},
sort_key='created_at',
sort_dir='desc')
for instance in instances:
try:
check_policy(context, 'get_all_instance_%s' % metadata_type,
instance)
metadata = instance.get(metadata_type, {})
for filt in search_filts:
# By chaining the input to the output, the filters are
# ANDed together
metadata = _filter_metadata(instance, filt, metadata)
for (k, v) in metadata.iteritems():
formatted_metadata_list.append({'key': k, 'value': v,
'instance_id': instance.get('uuid')})
except exception.PolicyNotAuthorized:
# failed policy check - not allowed to
# read this metadata
continue
return formatted_metadata_list
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
self.db.instance_metadata_delete(context, instance['uuid'], key)
instance['metadata'] = {}
notifications.send_update(context, instance, instance)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff={key: ['-']})
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def update_instance_metadata(self, context, instance,
metadata, delete=False):
"""Updates or creates instance metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
orig = self.get_instance_metadata(context, instance)
if delete:
_metadata = metadata
else:
_metadata = orig.copy()
_metadata.update(metadata)
self._check_metadata_properties_quota(context, _metadata)
metadata = self.db.instance_metadata_update(context, instance['uuid'],
_metadata, True)
instance['metadata'] = metadata
notifications.send_update(context, instance, instance)
diff = utils.diff_dict(orig, _metadata)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff=diff)
return _metadata
def get_instance_faults(self, context, instances):
"""Get all faults for a list of instance uuids."""
if not instances:
return {}
for instance in instances:
check_policy(context, 'get_instance_faults', instance)
uuids = [instance['uuid'] for instance in instances]
return self.db.instance_fault_get_by_instance_uuids(context, uuids)
def get_instance_bdms(self, context, instance, legacy=True):
"""Get all bdm tables for specified instance."""
bdms = self.db.block_device_mapping_get_all_by_instance(context,
instance['uuid'])
if legacy:
return block_device.legacy_mapping(bdms)
return bdms
def is_volume_backed_instance(self, context, instance, bdms=None):
if not instance['image_ref']:
return True
if bdms is None:
bdms = self.get_instance_bdms(context, instance, legacy=False)
root_bdm = block_device.get_root_bdm(bdms)
if root_bdm and root_bdm.get('destination_type') == 'volume':
return True
return False
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def live_migrate(self, context, instance, block_migration,
disk_over_commit, host_name):
"""Migrate a server lively to a new host."""
LOG.debug(_("Going to try to live migrate instance to %s"),
host_name or "another host", instance=instance)
instance.task_state = task_states.MIGRATING
instance.save(expected_task_state=None)
self.compute_task_api.live_migrate_instance(context, instance,
host_name, block_migration=block_migration,
disk_over_commit=disk_over_commit)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def evacuate(self, context, instance, host, on_shared_storage,
admin_password=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
raising an exception.
"""
LOG.debug(_('vm evacuation scheduled'))
inst_host = instance['host']
service = service_obj.Service.get_by_compute_host(context, inst_host)
if self.servicegroup_api.service_is_up(service):
msg = (_('Instance compute service state on %s '
'expected to be down, but it was up.') % inst_host)
LOG.error(msg)
raise exception.ComputeServiceInUse(host=inst_host)
instance = self.update(context, instance, expected_task_state=None,
task_state=task_states.REBUILDING)
self._record_action_start(context, instance, instance_actions.EVACUATE)
return self.compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=admin_password,
injected_files=None,
image_ref=None,
orig_image_ref=None,
orig_sys_metadata=None,
bdms=None,
recreate=True,
on_shared_storage=on_shared_storage,
host=host)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
return migration_obj.MigrationList.get_by_filters(context, filters)
@wrap_check_policy
def volume_snapshot_create(self, context, volume_id, create_info):
bdm = self.db.block_device_mapping_get_by_volume_id(context,
volume_id, ['instance'])
self.compute_rpcapi.volume_snapshot_create(context, bdm['instance'],
volume_id, create_info)
snapshot = {
'snapshot': {
'id': create_info.get('id'),
'volumeId': volume_id
}
}
return snapshot
@wrap_check_policy
def volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
bdm = self.db.block_device_mapping_get_by_volume_id(context,
volume_id, ['instance'])
self.compute_rpcapi.volume_snapshot_delete(context, bdm['instance'],
volume_id, snapshot_id, delete_info)
class HostAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host operations."""
def __init__(self, rpcapi=None):
self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
super(HostAPI, self).__init__()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Raise HostNotFound if compute host doesn't exist."""
service = service_obj.Service.get_by_compute_host(context, host_name)
if not service:
raise exception.HostNotFound(host=host_name)
if must_be_up and not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host_name)
return service['host']
def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
host_name = self._assert_host_exists(context, host_name)
return self.rpcapi.set_host_enabled(context, enabled=enabled,
host=host_name)
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
host_name = self._assert_host_exists(context, host_name,
must_be_up=True)
return self.rpcapi.get_host_uptime(context, host=host_name)
def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
host_name = self._assert_host_exists(context, host_name)
return self.rpcapi.host_power_action(context, action=action,
host=host_name)
def set_host_maintenance(self, context, host_name, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
host_name = self._assert_host_exists(context, host_name)
return self.rpcapi.host_maintenance_mode(context,
host_param=host_name, mode=mode, host=host_name)
def service_get_all(self, context, filters=None, set_zones=False):
"""Returns a list of services, optionally filtering the results.
If specified, 'filters' should be a dictionary containing services
attributes and matching values. Ie, to get a list of services for
the 'compute' topic, use filters={'topic': 'compute'}.
"""
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
if 'availability_zone' in filters:
set_zones = True
services = service_obj.ServiceList.get_all(context, disabled,
set_zones=set_zones)
ret_services = []
for service in services:
for key, val in filters.iteritems():
if service[key] != val:
break
else:
# All filters matched.
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, context, host_name):
"""Get service entry for the given compute hostname."""
return service_obj.Service.get_by_compute_host(context, host_name)
def service_update(self, context, host_name, binary, params_to_update):
"""Enable / Disable a service.
For compute services, this stops new builds and migrations going to
the host.
"""
service = service_obj.Service.get_by_args(context, host_name,
binary)
service.update(params_to_update)
service.save()
return service
def instance_get_all_by_host(self, context, host_name):
"""Return all instances on the given host."""
return self.db.instance_get_all_by_host(context, host_name)
def task_log_get_all(self, context, task_name, period_beginning,
period_ending, host=None, state=None):
"""Return the task logs within a given range, optionally
filtering by host and/or state.
"""
return self.db.task_log_get_all(context, task_name,
period_beginning,
period_ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Return compute node entry for particular integer ID."""
return self.db.compute_node_get(context, int(compute_id))
def compute_node_get_all(self, context):
return self.db.compute_node_get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return self.db.compute_node_search_by_hypervisor(context,
hypervisor_match)
def compute_node_statistics(self, context):
return self.db.compute_node_statistics(context)
class InstanceActionAPI(base.Base):
"""Sub-set of the Compute Manager API for managing instance actions."""
def actions_get(self, context, instance):
return instance_action.InstanceActionList.get_by_instance_uuid(
context, instance['uuid'])
def action_get_by_request_id(self, context, instance, request_id):
return instance_action.InstanceAction.get_by_request_id(
context, instance['uuid'], request_id)
def action_events_get(self, context, instance, action_id):
return instance_action.InstanceActionEventList.get_by_action(
context, action_id)
class AggregateAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host aggregates."""
def __init__(self, **kwargs):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
super(AggregateAPI, self).__init__(**kwargs)
@wrap_exception()
def create_aggregate(self, context, aggregate_name, availability_zone):
"""Creates the model for the aggregate."""
aggregate = aggregate_obj.Aggregate()
aggregate.name = aggregate_name
if availability_zone:
aggregate.metadata = {'availability_zone': availability_zone}
aggregate.create(context)
aggregate = self._reformat_aggregate_info(aggregate)
# To maintain the same API result as before.
del aggregate['hosts']
del aggregate['metadata']
return aggregate
def get_aggregate(self, context, aggregate_id):
"""Get an aggregate by id."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
return self._reformat_aggregate_info(aggregate)
def get_aggregate_list(self, context):
"""Get all the aggregates."""
aggregates = aggregate_obj.AggregateList.get_all(context)
return [self._reformat_aggregate_info(agg) for agg in aggregates]
@wrap_exception()
def update_aggregate(self, context, aggregate_id, values):
"""Update the properties of an aggregate."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
if 'name' in values:
aggregate.name = values.pop('name')
if values:
aggregate.metadata = values
aggregate.save()
# If updated values include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if values.get('availability_zone'):
availability_zones.reset_cache()
return self._reformat_aggregate_info(aggregate)
@wrap_exception()
def update_aggregate_metadata(self, context, aggregate_id, metadata):
"""Updates the aggregate metadata."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.update_metadata(metadata)
return aggregate
@wrap_exception()
def delete_aggregate(self, context, aggregate_id):
"""Deletes the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id}
compute_utils.notify_about_aggregate_update(context,
"delete.start",
aggregate_payload)
aggregate = aggregate_obj.Aggregate.get_by_id(context,
aggregate_id)
if len(aggregate.hosts) > 0:
raise exception.InvalidAggregateAction(action='delete',
aggregate_id=aggregate_id,
reason='not empty')
aggregate.destroy()
compute_utils.notify_about_aggregate_update(context,
"delete.end",
aggregate_payload)
def _check_az_for_host(self, aggregate_meta, host_az, aggregate_id):
# NOTE(mtreinish) The availability_zone key returns a set of
# zones so loop over each zone. However there should only
# ever be one zone in the set because an aggregate can only
# have a single availability zone set at one time.
for aggregate_az in aggregate_meta["availability_zone"]:
# NOTE(mtreinish) Ensure that the aggregate_az is not none
# if it is none then that is just a regular aggregate and
# it is valid to have a host in multiple aggregates.
if aggregate_az and aggregate_az != host_az:
msg = _("Host already in availability zone "
"%s") % host_az
action_name = "add_host_to_aggregate"
raise exception.InvalidAggregateAction(
action=action_name, aggregate_id=aggregate_id,
reason=msg)
@wrap_exception()
def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"addhost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
service_obj.Service.get_by_compute_host(context, host_name)
host_az = availability_zones.get_host_availability_zone(context,
host_name)
if host_az and host_az != CONF.default_availability_zone:
aggregate_meta = self.db.aggregate_metadata_get_by_metadata_key(
context, aggregate_id, 'availability_zone')
if aggregate_meta.get("availability_zone"):
self._check_az_for_host(aggregate_meta, host_az, aggregate_id)
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.add_host(context, host_name)
#NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
aggregate_payload.update({'name': aggregate['name']})
compute_utils.notify_about_aggregate_update(context,
"addhost.end",
aggregate_payload)
return self._reformat_aggregate_info(aggregate)
@wrap_exception()
def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"removehost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
service_obj.Service.get_by_compute_host(context, host_name)
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.delete_host(host_name)
self.compute_rpcapi.remove_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
compute_utils.notify_about_aggregate_update(context,
"removehost.end",
aggregate_payload)
return self._reformat_aggregate_info(aggregate)
def _reformat_aggregate_info(self, aggregate):
"""Builds a dictionary with aggregate props, metadata and hosts."""
return dict(aggregate.iteritems())
class KeypairAPI(base.Base):
"""Subset of the Compute Manager API for managing key pairs."""
def _notify(self, context, event_suffix, keypair_name):
payload = {
'tenant_id': context.project_id,
'user_id': context.user_id,
'key_name': keypair_name,
}
notify = notifier.get_notifier(service='api')
notify.info(context, 'keypair.%s' % event_suffix, payload)
def _validate_new_key_pair(self, context, user_id, key_name):
safe_chars = "_- " + string.digits + string.ascii_letters
clean_value = "".join(x for x in key_name if x in safe_chars)
if clean_value != key_name:
raise exception.InvalidKeypair(
reason=_("Keypair name contains unsafe characters"))
if not 0 < len(key_name) < 256:
raise exception.InvalidKeypair(
reason=_('Keypair name must be between '
'1 and 255 characters long'))
count = QUOTAS.count(context, 'key_pairs', user_id)
try:
QUOTAS.limit_check(context, key_pairs=count + 1)
except exception.OverQuota:
raise exception.KeypairLimitExceeded()
@exception.wrap_exception(notifier=notifier.get_notifier(service='api'))
def import_key_pair(self, context, user_id, key_name, public_key):
"""Import a key pair using an existing public key."""
self._validate_new_key_pair(context, user_id, key_name)
self._notify(context, 'import.start', key_name)
fingerprint = crypto.generate_fingerprint(public_key)
keypair = keypair_obj.KeyPair()
keypair.user_id = user_id
keypair.name = key_name
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create(context)
self._notify(context, 'import.end', key_name)
return keypair
@exception.wrap_exception(notifier=notifier.get_notifier(service='api'))
def create_key_pair(self, context, user_id, key_name):
"""Create a new key pair."""
self._validate_new_key_pair(context, user_id, key_name)
self._notify(context, 'create.start', key_name)
private_key, public_key, fingerprint = crypto.generate_key_pair()
keypair = keypair_obj.KeyPair()
keypair.user_id = user_id
keypair.name = key_name
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create(context)
self._notify(context, 'create.end', key_name)
return keypair, private_key
@exception.wrap_exception(notifier=notifier.get_notifier(service='api'))
def delete_key_pair(self, context, user_id, key_name):
"""Delete a keypair by name."""
self._notify(context, 'delete.start', key_name)
keypair_obj.KeyPair.destroy_by_name(context, user_id, key_name)
self._notify(context, 'delete.end', key_name)
def get_key_pairs(self, context, user_id):
"""List key pairs."""
return keypair_obj.KeyPairList.get_by_user(context, user_id)
def get_key_pair(self, context, user_id, key_name):
"""Get a keypair by name."""
return keypair_obj.KeyPair.get_by_name(context, user_id, key_name)
class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
"""
Sub-set of the Compute API related to managing security groups
and security group rules
"""
# The nova security group api does not use a uuid for the id.
id_is_uuid = False
def __init__(self, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs)
self.security_group_rpcapi = compute_rpcapi.SecurityGroupAPI()
def validate_property(self, value, property, allowed):
"""
Validate given security group property.
:param value: the value to validate, as a string or unicode
:param property: the property, either 'name' or 'description'
:param allowed: the range of characters allowed
"""
try:
val = value.strip()
except AttributeError:
msg = _("Security group %s is not a string or unicode") % property
self.raise_invalid_property(msg)
if not val:
msg = _("Security group %s cannot be empty.") % property
self.raise_invalid_property(msg)
if allowed and not re.match(allowed, val):
# Some validation to ensure that values match API spec.
# - Alphanumeric characters, spaces, dashes, and underscores.
# TODO(Daviey): LP: #813685 extend beyond group_name checking, and
# probably create a param validator that can be used elsewhere.
msg = (_("Value (%(value)s) for parameter Group%(property)s is "
"invalid. Content limited to '%(allowed)s'.") %
{'value': value, 'allowed': allowed,
'property': property.capitalize()})
self.raise_invalid_property(msg)
if len(val) > 255:
msg = _("Security group %s should not be greater "
"than 255 characters.") % property
self.raise_invalid_property(msg)
def ensure_default(self, context):
"""Ensure that a context has a security group.
Creates a security group for the security context if it does not
already exist.
:param context: the security context
"""
self.db.security_group_ensure_default(context)
def create_security_group(self, context, name, description):
try:
reservations = QUOTAS.reserve(context, security_groups=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg)
LOG.audit(_("Create Security Group %s"), name, context=context)
try:
self.ensure_default(context)
group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': name,
'description': description}
try:
group_ref = self.db.security_group_create(context, group)
except exception.SecurityGroupExists:
msg = _('Security group %s already exists') % name
self.raise_group_already_exists(msg)
# Commit the reservation
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
return group_ref
def update_security_group(self, context, security_group,
name, description):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = (_("Unable to update system group '%s'") %
security_group['name'])
self.raise_invalid_group(msg)
group = {'name': name,
'description': description}
group_ref = self.db.security_group_update(context,
security_group['id'],
group)
return group_ref
def get(self, context, name=None, id=None, map_exception=False):
self.ensure_default(context)
try:
if name:
return self.db.security_group_get_by_name(context,
context.project_id,
name)
elif id:
return self.db.security_group_get(context, id)
except exception.NotFound as exp:
if map_exception:
msg = exp.format_message()
self.raise_not_found(msg)
else:
raise
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
self.ensure_default(context)
groups = []
if names or ids:
if names:
for name in names:
groups.append(self.db.security_group_get_by_name(context,
project,
name))
if ids:
for id in ids:
groups.append(self.db.security_group_get(context, id))
elif context.is_admin:
# TODO(eglynn): support a wider set of search options than just
# all_tenants, at least include the standard filters defined for
# the EC2 DescribeSecurityGroups API for the non-admin case also
if (search_opts and 'all_tenants' in search_opts):
groups = self.db.security_group_get_all(context)
else:
groups = self.db.security_group_get_by_project(context,
project)
elif project:
groups = self.db.security_group_get_by_project(context, project)
return groups
def destroy(self, context, security_group):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = _("Unable to delete system group '%s'") % \
security_group['name']
self.raise_invalid_group(msg)
if self.db.security_group_in_use(context, security_group['id']):
msg = _("Security group is still in use")
self.raise_invalid_group(msg)
# Get reservations
try:
reservations = QUOTAS.reserve(context, security_groups=-1)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deallocating "
"security group"))
LOG.audit(_("Delete security group %s"), security_group['name'],
context=context)
self.db.security_group_destroy(context, security_group['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations)
def is_associated_with_server(self, security_group, instance_uuid):
"""Check if the security group is already associated
with the instance. If Yes, return True.
"""
if not security_group:
return False
instances = security_group.get('instances')
if not instances:
return False
for inst in instances:
if (instance_uuid == inst['uuid']):
return True
return False
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_add_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if not self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupNotExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_remove_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
def get_rule(self, context, id):
self.ensure_default(context)
try:
return self.db.security_group_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding multiple
security group rules at once but the EC2 one does. Therefore,
this function is written to support both.
"""
count = QUOTAS.count(context, 'security_group_rules', id)
try:
projected = count + len(vals)
QUOTAS.limit_check(context, security_group_rules=projected)
except exception.OverQuota:
msg = _("Quota exceeded, too many security group rules.")
self.raise_over_quota(msg)
msg = _("Authorize security group ingress %s")
LOG.audit(msg, name, context=context)
rules = [self.db.security_group_rule_create(context, v) for v in vals]
self.trigger_rules_refresh(context, id=id)
return rules
def remove_rules(self, context, security_group, rule_ids):
msg = _("Revoke security group ingress %s")
LOG.audit(msg, security_group['name'], context=context)
for rule_id in rule_ids:
self.db.security_group_rule_destroy(context, rule_id)
# NOTE(vish): we removed some rules, so refresh
self.trigger_rules_refresh(context, id=security_group['id'])
def remove_default_rules(self, context, rule_ids):
for rule_id in rule_ids:
self.db.security_group_default_rule_destroy(context, rule_id)
def add_default_rules(self, context, vals):
rules = [self.db.security_group_default_rule_create(context, v)
for v in vals]
return rules
def default_rule_exists(self, context, values):
"""Indicates whether the specified rule values are already
defined in the default security group rules.
"""
for rule in self.db.security_group_default_rule_list(context):
is_duplicate = True
keys = ('cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != values.get(key):
is_duplicate = False
break
if is_duplicate:
return rule.get('id') or True
return False
def get_all_default_rules(self, context):
try:
rules = self.db.security_group_default_rule_list(context)
except Exception:
msg = 'cannot get default security group rules'
raise exception.SecurityGroupDefaultRuleNotFound(msg)
return rules
def get_default_rule(self, context, id):
try:
return self.db.security_group_default_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def validate_id(self, id):
try:
return int(id)
except ValueError:
msg = _("Security group id should be integer")
self.raise_invalid_property(msg)
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
security_group = self.db.security_group_get(
context, id, columns_to_join=['instances'])
for instance in security_group['instances']:
if instance['host'] is not None:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
# First, we get the security group rules that reference these groups as
# the grantee..
security_group_rules = set()
for group_id in group_ids:
security_group_rules.update(
self.db.security_group_rule_get_by_security_group_grantee(
context,
group_id))
# ..then we distill the rules into the groups to which they belong..
security_groups = set()
for rule in security_group_rules:
security_group = self.db.security_group_get(
context, rule['parent_group_id'],
columns_to_join=['instances'])
security_groups.add(security_group)
# ..then we find the instances that are members of these groups..
instances = {}
for security_group in security_groups:
for instance in security_group['instances']:
if instance['uuid'] not in instances:
instances[instance['uuid']] = instance
# ..then we send a request to refresh the rules for each instance.
for instance in instances.values():
if instance['host']:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def get_instance_security_groups(self, context, instance_uuid,
detailed=False):
if detailed:
return self.db.security_group_get_by_instance(context,
instance_uuid)
instance = self.db.instance_get_by_uuid(context, instance_uuid)
groups = instance.get('security_groups')
if groups:
return [{'name': group['name']} for group in groups]
def populate_security_groups(self, instance, security_groups):
if not security_groups:
# Make sure it's an empty list and not None
security_groups = []
instance.security_groups = security_group_obj.make_secgroup_list(
security_groups)
|
apache-2.0
|
kost/volatility
|
volatility/plugins/mac/moddump.py
|
12
|
4739
|
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import os
import re
import volatility.obj as obj
import volatility.debug as debug
import volatility.plugins.mac.common as common
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
class mac_moddump(common.AbstractMacCommand):
""" Writes the specified kernel extension to disk """
def __init__(self, config, *args, **kwargs):
common.AbstractMacCommand.__init__(self, config, *args, **kwargs)
self._config.add_option('BASE', short_option = 'b', default = None, help = 'Dump driver with BASE address (in hex)', action = 'store', type = 'int')
self._config.add_option('REGEX', short_option = 'r', help = 'Dump modules matching REGEX', action = 'store', type = 'string')
self._config.add_option('IGNORE-CASE', short_option = 'i', help = 'Ignore case in pattern match', action = 'store_true', default = False)
self._config.add_option('DUMP-DIR', short_option = 'D', default = None, help = 'Output directory', action = 'store', type = 'str')
def calculate(self):
common.set_plugin_members(self)
if self._config.REGEX:
try:
if self._config.IGNORE_CASE:
mod_re = re.compile(self._config.REGEX, re.I)
else:
mod_re = re.compile(self._config.REGEX)
except re.error, e:
debug.error('Error parsing regular expression: {0}'.format(e))
if self._config.BASE:
module_address = int(self._config.BASE)
yield obj.Object("kmod_info", offset = module_address, vm = self.addr_space)
else:
modules_addr = self.addr_space.profile.get_symbol("_kmod")
modules_ptr = obj.Object("Pointer", vm = self.addr_space, offset = modules_addr)
mod = modules_ptr.dereference_as("kmod_info")
while mod.is_valid():
if self._config.REGEX and not mod_re.search(str(mod.name)):
mod = mod.next
continue
yield mod
mod = mod.next
def unified_output(self, data):
if (not self._config.DUMP_DIR or not os.path.isdir(self._config.DUMP_DIR)):
debug.error("Please specify an existing output dir (--dump-dir)")
return TreeGrid([("Address", Address),
("Size", int),
("Output Path", str),
], self.generator(data))
def generator(self, data):
for kmod in data:
start = kmod.address
size = kmod.m("size")
file_name = "{0}.{1:#x}.kext".format(kmod.name, kmod.obj_offset)
mod_file = open(os.path.join(self._config.DUMP_DIR, file_name), 'wb')
mod_data = self.addr_space.zread(kmod.address, size)
mod_file.write(mod_data)
mod_file.close()
yield(0, [
Address(start),
int(size),
str(file_name),
])
def render_text(self, outfd, data):
if (not self._config.DUMP_DIR or not os.path.isdir(self._config.DUMP_DIR)):
debug.error("Please specify an existing output dir (--dump-dir)")
self.table_header(outfd, [("Address", "[addrpad]"),
("Size", "8"),
("Output Path", "")])
for kmod in data:
start = kmod.address
size = kmod.m("size")
file_name = "{0}.{1:#x}.kext".format(kmod.name, kmod.obj_offset)
mod_file = open(os.path.join(self._config.DUMP_DIR, file_name), 'wb')
mod_data = self.addr_space.zread(kmod.address, size)
mod_file.write(mod_data)
mod_file.close()
self.table_row(outfd, start, size, file_name)
|
gpl-2.0
|
arruah/ensocoin
|
contrib/testgen/gen_base58_test_vectors.py
|
1000
|
4343
|
#!/usr/bin/env python
'''
Generate valid and invalid base58 address and private key test vectors.
Usage:
gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json
gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json
'''
# 2012 Wladimir J. van der Laan
# Released under MIT License
import os
from itertools import islice
from base58 import b58encode, b58decode, b58encode_chk, b58decode_chk, b58chars
import random
from binascii import b2a_hex
# key types
PUBKEY_ADDRESS = 0
SCRIPT_ADDRESS = 5
PUBKEY_ADDRESS_TEST = 111
SCRIPT_ADDRESS_TEST = 196
PRIVKEY = 128
PRIVKEY_TEST = 239
metadata_keys = ['isPrivkey', 'isTestnet', 'addrType', 'isCompressed']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, False, 'pubkey', None)),
((SCRIPT_ADDRESS,), 20, (), (False, False, 'script', None)),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, 'pubkey', None)),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, 'script', None)),
((PRIVKEY,), 32, (), (True, False, None, False)),
((PRIVKEY,), 32, (1,), (True, False, None, True)),
((PRIVKEY_TEST,), 32, (), (True, True, None, False)),
((PRIVKEY_TEST,), 32, (1,), (True, True, None, True))
]
def is_valid(v):
'''Check vector v for validity'''
result = b58decode_chk(v)
if result is None:
return False
valid = False
for template in templates:
prefix = str(bytearray(template[0]))
suffix = str(bytearray(template[2]))
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return False
def gen_valid_vectors():
'''Generate valid test vectors'''
while True:
for template in templates:
prefix = str(bytearray(template[0]))
payload = os.urandom(template[1])
suffix = str(bytearray(template[2]))
rv = b58encode_chk(prefix + payload + suffix)
assert is_valid(rv)
metadata = dict([(x,y) for (x,y) in zip(metadata_keys,template[3]) if y is not None])
yield (rv, b2a_hex(payload), metadata)
def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix):
'''Generate possibly invalid vector'''
if corrupt_prefix:
prefix = os.urandom(1)
else:
prefix = str(bytearray(template[0]))
if randomize_payload_size:
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
else:
payload = os.urandom(template[1])
if corrupt_suffix:
suffix = os.urandom(len(template[2]))
else:
suffix = str(bytearray(template[2]))
return b58encode_chk(prefix + payload + suffix)
def randbool(p = 0.5):
'''Return True with P(p)'''
return random.random() < p
def gen_invalid_vectors():
'''Generate invalid test vectors'''
# start with some manual edge-cases
yield "",
yield "x",
while True:
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
for template in templates:
val = gen_invalid_vector(template, randbool(0.2), randbool(0.2), randbool(0.2))
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
if not is_valid(val):
yield val,
if __name__ == '__main__':
import sys, json
iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors}
try:
uiter = iters[sys.argv[1]]
except IndexError:
uiter = gen_valid_vectors
try:
count = int(sys.argv[2])
except IndexError:
count = 0
data = list(islice(uiter(), count))
json.dump(data, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
|
mit
|
RobberPhex/thrift
|
tutorial/py.tornado/PythonServer.py
|
34
|
2779
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import glob
import sys
sys.path.append('gen-py.tornado')
sys.path.insert(0, glob.glob('../../lib/py/build/lib*')[0])
from tutorial import Calculator
from tutorial.ttypes import Operation, InvalidOperation
from shared.ttypes import SharedStruct
from thrift import TTornado
from thrift.protocol import TBinaryProtocol
from tornado import ioloop
class CalculatorHandler(object):
def __init__(self):
self.log = {}
def ping(self):
print("ping()")
def add(self, n1, n2):
print("add({}, {})".format(n1, n2))
return n1 + n2
def calculate(self, logid, work):
print("calculate({}, {})".format(logid, work))
if work.op == Operation.ADD:
val = work.num1 + work.num2
elif work.op == Operation.SUBTRACT:
val = work.num1 - work.num2
elif work.op == Operation.MULTIPLY:
val = work.num1 * work.num2
elif work.op == Operation.DIVIDE:
if work.num2 == 0:
x = InvalidOperation()
x.whatOp = work.op
x.why = "Cannot divide by 0"
raise x
val = work.num1 / work.num2
else:
x = InvalidOperation()
x.whatOp = work.op
x.why = "Invalid operation"
raise x
log = SharedStruct()
log.key = logid
log.value = '%d' % (val)
self.log[logid] = log
return val
def getStruct(self, key):
print("getStruct({})".format(key))
return self.log[key]
def zip(self):
print("zip()")
def main():
handler = CalculatorHandler()
processor = Calculator.Processor(handler)
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TTornado.TTornadoServer(processor, pfactory)
print("Starting the server...")
server.bind(9090)
server.start(1)
ioloop.IOLoop.instance().start()
print("done.")
if __name__ == "__main__":
main()
|
apache-2.0
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/google/cloud/gapic/errorreporting/v1beta1/error_group_service_client.py
|
1
|
10469
|
# Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# EDITING INSTRUCTIONS
# This file was generated from the file
# https://github.com/google/googleapis/blob/master/google/devtools/clouderrorreporting/v1beta1/error_group_service.proto,
# and updates to that file get reflected here through a refresh process.
# For the short term, the refresh process will only be runnable by Google engineers.
#
# The only allowed edits are to method and file documentation. A 3-way
# merge preserves those additions if the generated source changes.
"""Accesses the google.devtools.clouderrorreporting.v1beta1 ErrorGroupService API."""
import collections
import json
import os
import pkg_resources
import platform
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
import google.gax
from google.cloud.gapic.errorreporting.v1beta1 import enums
from google.cloud.proto.devtools.clouderrorreporting.v1beta1 import common_pb2
from google.cloud.proto.devtools.clouderrorreporting.v1beta1 import error_group_service_pb2
class ErrorGroupServiceClient(object):
"""Service for retrieving and updating individual error groups."""
SERVICE_ADDRESS = 'clouderrorreporting.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = ('https://www.googleapis.com/auth/cloud-platform', )
_GROUP_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/groups/{group}')
@classmethod
def group_path(cls, project, group):
"""Returns a fully-qualified group resource name string."""
return cls._GROUP_PATH_TEMPLATE.render({
'project': project,
'group': group,
})
@classmethod
def match_project_from_group_name(cls, group_name):
"""Parses the project from a group resource.
Args:
group_name (string): A fully-qualified path representing a group
resource.
Returns:
A string representing the project.
"""
return cls._GROUP_PATH_TEMPLATE.match(group_name).get('project')
@classmethod
def match_group_from_group_name(cls, group_name):
"""Parses the group from a group resource.
Args:
group_name (string): A fully-qualified path representing a group
resource.
Returns:
A string representing the group.
"""
return cls._GROUP_PATH_TEMPLATE.match(group_name).get('group')
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
credentials=None,
ssl_credentials=None,
scopes=None,
client_config=None,
app_name=None,
app_version='',
lib_name=None,
lib_version='',
metrics_headers=()):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.Channel`): A ``Channel`` instance through
which to make calls.
credentials (object): The authorization credentials to attach to
requests. These credentials identify this application to the
service.
ssl_credentials (:class:`grpc.ChannelCredentials`): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
scopes (list[string]): A list of OAuth2 scopes to attach to requests.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
app_name (string): The name of the application calling
the service. Recommended for analytics purposes.
app_version (string): The version of the application calling
the service. Recommended for analytics purposes.
lib_name (string): The API library software used for calling
the service. (Unless you are writing an API client itself,
leave this as default.)
lib_version (string): The API library software version used
for calling the service. (Unless you are writing an API client
itself, leave this as default.)
metrics_headers (dict): A dictionary of values for tracking
client library metrics. Ultimately serializes to a string
(e.g. 'foo/1.2.3 bar/3.14.1'). This argument should be
considered private.
Returns:
A ErrorGroupServiceClient object.
"""
# Unless the calling application specifically requested
# OAuth scopes, request everything.
if scopes is None:
scopes = self._ALL_SCOPES
# Initialize an empty client config, if none is set.
if client_config is None:
client_config = {}
# Initialize metrics_headers as an ordered dictionary
# (cuts down on cardinality of the resulting string slightly).
metrics_headers = collections.OrderedDict(metrics_headers)
metrics_headers['gl-python'] = platform.python_version()
# The library may or may not be set, depending on what is
# calling this client. Newer client libraries set the library name
# and version.
if lib_name:
metrics_headers[lib_name] = lib_version
# Finally, track the GAPIC package version.
metrics_headers['gapic'] = pkg_resources.get_distribution(
'gapic-google-cloud-error-reporting-v1beta1', ).version
# Load the configuration defaults.
default_client_config = json.loads(
pkg_resources.resource_string(
__name__, 'error_group_service_client_config.json').decode())
defaults = api_callable.construct_settings(
'google.devtools.clouderrorreporting.v1beta1.ErrorGroupService',
default_client_config,
client_config,
config.STATUS_CODE_NAMES,
metrics_headers=metrics_headers, )
self.error_group_service_stub = config.create_stub(
error_group_service_pb2.ErrorGroupServiceStub,
channel=channel,
service_path=service_path,
service_port=port,
credentials=credentials,
scopes=scopes,
ssl_credentials=ssl_credentials)
self._get_group = api_callable.create_api_call(
self.error_group_service_stub.GetGroup,
settings=defaults['get_group'])
self._update_group = api_callable.create_api_call(
self.error_group_service_stub.UpdateGroup,
settings=defaults['update_group'])
# Service calls
def get_group(self, group_name, options=None):
"""
Get the specified group.
Example:
>>> from google.cloud.gapic.errorreporting.v1beta1 import error_group_service_client
>>> api = error_group_service_client.ErrorGroupServiceClient()
>>> group_name = api.group_path('[PROJECT]', '[GROUP]')
>>> response = api.get_group(group_name)
Args:
group_name (string): [Required] The group resource name. Written as
<code>projects/<var>projectID</var>/groups/<var>group_name</var></code>.
Call
<a href=\"/error-reporting/reference/rest/v1beta1/projects.groupStats/list\">
<code>groupStats.list</code></a> to return a list of groups belonging to
this project.
Example: <code>projects/my-project-123/groups/my-group</code>
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.devtools.clouderrorreporting.v1beta1.common_pb2.ErrorGroup` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = error_group_service_pb2.GetGroupRequest(
group_name=group_name)
return self._get_group(request, options)
def update_group(self, group, options=None):
"""
Replace the data for the specified group.
Fails if the group does not exist.
Example:
>>> from google.cloud.gapic.errorreporting.v1beta1 import error_group_service_client
>>> from google.cloud.proto.devtools.clouderrorreporting.v1beta1 import common_pb2
>>> api = error_group_service_client.ErrorGroupServiceClient()
>>> group = common_pb2.ErrorGroup()
>>> response = api.update_group(group)
Args:
group (:class:`google.cloud.proto.devtools.clouderrorreporting.v1beta1.common_pb2.ErrorGroup`): [Required] The group which replaces the resource on the server.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.devtools.clouderrorreporting.v1beta1.common_pb2.ErrorGroup` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = error_group_service_pb2.UpdateGroupRequest(group=group)
return self._update_group(request, options)
|
mit
|
AZCompTox/AZOrange
|
orange/OrangeWidgets/Data/OWAddClass.py
|
2
|
2879
|
"""
<name>Add Class</name>
<description>Adds a new class to a dataset</description>
<icon>icons/AddClass.png</icon>
<priority>1111</priority>
<contact>Pedro Rafael Almeida (engpedrorafael(@at@)gmail.com)</contact>
"""
from OWWidget import *
import OWGUI
from AZutilities import dataUtilities
import types
class OWAddClass(OWWidget):
minReqVer = 4
def __init__(self,parent=None, signalManager = None):
OWWidget.__init__(self, parent, signalManager, "FeatureConstructor",wantMainArea = 0)
self.inputs = [("Examples", ExampleTable, self.setData)]
self.outputs = [("Examples", ExampleTable)]
self.newName = "NEW_CLASS"
self.value = "?"
self.values = "[]"
self.data = None
box = OWGUI.widgetBox(self.controlArea,"Add discrete class")
newNameBox = OWGUI.lineEdit(box, self, "newName", "Class name: ", labelWidth=70, orientation="horizontal", tooltip="Name of the new Class attribute to be added")
valueBox = OWGUI.lineEdit(box, self, "value", "Value: ", labelWidth=70, orientation="horizontal", tooltip="Class value to be assigned to each example")
#valuesBox = OWGUI.lineEdit(box, self, "values", "Class values: ", labelWidth=70, orientation="horizontal", tooltip='Optional, list of possible values of new discrete class attribute ex: ["POS", "NEG"]')
self.BT = OWGUI.button(self.controlArea, self, "&Apply", callback = self.apply, disabled=0)
infoBox = OWGUI.widgetBox(self,"Info")
OWGUI.widgetLabel(infoBox, 'Adds a new discrete response variable\nto the data and gives all examples the same value.')
self.adjustSize()
def setData(self, data):
self.data = data
self.error(0)
self.warning(0)
if not data:
self.send("Examples", None)
else:
self.apply()
def apply(self):
self.error(0)
self.warning(0)
try:
values = eval(self.values)
if type(values) != types.ListType:
raise "Wrong values"
else:
for v in values:
if type(v) not in types.StringTypes:
raise "Wrong values"
except:
values = []
self.warning(0,'Invalid values list. Usage: ["A","B","C"]')
#self.newName = self.newName.upper()
newTable = dataUtilities.addDiscreteClass(self.data, self.newName,self.value,values)
if not newTable:
self.error(0,'It was not possible to add the attribute the way you specified. Please check again the options!')
elif newTable.domain.classVar.name != self.newName:
self.warning(0,'There was already an attribute named '+ self.newName+' the new class was created with name '+newTable.domain.classVar.name)
self.send("Examples", newTable)
|
lgpl-3.0
|
DataDog/python-openid
|
examples/djopenid/settings.py
|
66
|
2601
|
# Django settings for djopenid project.
import os
import sys
import warnings
try:
import openid
except ImportError, e:
warnings.warn("Could not import OpenID library. Please consult the djopenid README.")
sys.exit(1)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_NAME = '/tmp/test.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. All choices can be found here:
# http://www.postgresql.org/docs/current/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'u^bw6lmsa6fah0$^lz-ct$)y7x7#ag92-z+y45-8!(jk0lkavy'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
)
ROOT_URLCONF = 'djopenid.urls'
TEMPLATE_CONTEXT_PROCESSORS = ()
TEMPLATE_DIRS = (
os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates')),
)
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.sessions',
'djopenid.consumer',
'djopenid.server',
)
|
apache-2.0
|
wskplho/sl4a
|
python/src/Lib/sgmllib.py
|
306
|
17884
|
"""A parser for SGML, using the derived class as a static DTD."""
# XXX This only supports those SGML features used by HTML.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special). RCDATA is
# not supported at all.
from warnings import warnpy3k
warnpy3k("the sgmllib module has been removed in Python 3.0",
stacklevel=2)
del warnpy3k
import markupbase
import re
__all__ = ["SGMLParser", "SGMLParseError"]
# Regular expressions used for parsing
interesting = re.compile('[&<]')
incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|#[0-9]*)?|'
'<([a-zA-Z][^<>]*|'
'/([a-zA-Z][^<>]*)?|'
'![^<>]*)?')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#([0-9]+)[^0-9]')
starttagopen = re.compile('<[>a-zA-Z]')
shorttagopen = re.compile('<[a-zA-Z][-.a-zA-Z0-9]*/')
shorttag = re.compile('<([a-zA-Z][-.a-zA-Z0-9]*)/([^/]*)/')
piclose = re.compile('>')
endbracket = re.compile('[<>]')
tagfind = re.compile('[a-zA-Z][-_.a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?')
class SGMLParseError(RuntimeError):
"""Exception raised for all parse errors."""
pass
# SGML parser base class -- find tags and call handler functions.
# Usage: p = SGMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods
# with special names to handle tags: start_foo and end_foo to handle
# <foo> and </foo>, respectively, or do_foo to handle <foo> by itself.
# (Tags are converted to lower case for this purpose.) The data
# between tags is passed to the parser by calling self.handle_data()
# with some data as argument (the data may be split up in arbitrary
# chunks). Entity references are passed by calling
# self.handle_entityref() with the entity reference as argument.
class SGMLParser(markupbase.ParserBase):
# Definition of entities -- derived classes may override
entity_or_charref = re.compile('&(?:'
'([a-zA-Z][-.a-zA-Z0-9]*)|#([0-9]+)'
')(;?)')
def __init__(self, verbose=0):
"""Initialize and reset this instance."""
self.verbose = verbose
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.__starttag_text = None
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
markupbase.ParserBase.reset(self)
def setnomoretags(self):
"""Enter literal mode (CDATA) till EOF.
Intended for derived classes only.
"""
self.nomoretags = self.literal = 1
def setliteral(self, *args):
"""Enter literal mode (CDATA).
Intended for derived classes only.
"""
self.literal = 1
def feed(self, data):
"""Feed some data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n'). (This just saves the text,
all the processing is done by goahead().)
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle the remaining data."""
self.goahead(1)
def error(self, message):
raise SGMLParseError(message)
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.nomoretags:
self.handle_data(rawdata[i:n])
i = n
break
match = interesting.search(rawdata, i)
if match: j = match.start()
else: j = n
if i < j:
self.handle_data(rawdata[i:j])
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
i = k
continue
if rawdata.startswith("</", i):
k = self.parse_endtag(i)
if k < 0: break
i = k
self.literal = 0
continue
if self.literal:
if n > (i + 1):
self.handle_data("<")
i = i+1
else:
# incomplete
break
continue
if rawdata.startswith("<!--", i):
# Strictly speaking, a comment is --.*--
# within a declaration tag <!...>.
# This should be removed,
# and comments handled only in parse_declaration.
k = self.parse_comment(i)
if k < 0: break
i = k
continue
if rawdata.startswith("<?", i):
k = self.parse_pi(i)
if k < 0: break
i = i+k
continue
if rawdata.startswith("<!", i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
k = self.parse_declaration(i)
if k < 0: break
i = k
continue
elif rawdata[i] == '&':
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
match = charref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_charref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
else:
self.error('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
match = incomplete.match(rawdata, i)
if not match:
self.handle_data(rawdata[i])
i = i+1
continue
j = match.end(0)
if j == n:
break # Really incomplete
self.handle_data(rawdata[i:j])
i = j
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = n
self.rawdata = rawdata[i:]
# XXX if end: check for empty stack
# Extensions for the DOCTYPE scanner:
_decl_otherchars = '='
# Internal -- parse processing instr, return length or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
if rawdata[i:i+2] != '<?':
self.error('unexpected call to parse_pi()')
match = piclose.search(rawdata, i+2)
if not match:
return -1
j = match.start(0)
self.handle_pi(rawdata[i+2: j])
j = match.end(0)
return j-i
def get_starttag_text(self):
return self.__starttag_text
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
start_pos = i
rawdata = self.rawdata
if shorttagopen.match(rawdata, i):
# SGML shorthand: <tag/data/ == <tag>data</tag>
# XXX Can data contain &... (entity or char refs)?
# XXX Can data contain < or > (tag characters)?
# XXX Can there be whitespace before the first /?
match = shorttag.match(rawdata, i)
if not match:
return -1
tag, data = match.group(1, 2)
self.__starttag_text = '<%s/' % tag
tag = tag.lower()
k = match.end(0)
self.finish_shorttag(tag, data)
self.__starttag_text = rawdata[start_pos:match.end(1) + 1]
return k
# XXX The following should skip matching quotes (' or ")
# As a shortcut way to exit, this isn't so bad, but shouldn't
# be used to locate the actual end of the start tag since the
# < or > characters may be embedded in an attribute value.
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
if rawdata[i:i+2] == '<>':
# SGML shorthand: <> == <last open tag seen>
k = j
tag = self.lasttag
else:
match = tagfind.match(rawdata, i+1)
if not match:
self.error('unexpected call to parse_starttag')
k = match.end(0)
tag = rawdata[i+1:k].lower()
self.lasttag = tag
while k < j:
match = attrfind.match(rawdata, k)
if not match: break
attrname, rest, attrvalue = match.group(1, 2, 3)
if not rest:
attrvalue = attrname
else:
if (attrvalue[:1] == "'" == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
# strip quotes
attrvalue = attrvalue[1:-1]
attrvalue = self.entity_or_charref.sub(
self._convert_ref, attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = match.end(0)
if rawdata[j] == '>':
j = j+1
self.__starttag_text = rawdata[start_pos:j]
self.finish_starttag(tag, attrs)
return j
# Internal -- convert entity or character reference
def _convert_ref(self, match):
if match.group(2):
return self.convert_charref(match.group(2)) or \
'&#%s%s' % match.groups()[1:]
elif match.group(3):
return self.convert_entityref(match.group(1)) or \
'&%s;' % match.group(1)
else:
return '&%s' % match.group(1)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
tag = rawdata[i+2:j].strip().lower()
if rawdata[j] == '>':
j = j+1
self.finish_endtag(tag)
return j
# Internal -- finish parsing of <tag/data/ (same as <tag>data</tag>)
def finish_shorttag(self, tag, data):
self.finish_starttag(tag, [])
self.handle_data(data)
self.finish_endtag(tag)
# Internal -- finish processing of start tag
# Return -1 for unknown tag, 0 for open-only tag, 1 for balanced tag
def finish_starttag(self, tag, attrs):
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 0
else:
self.stack.append(tag)
self.handle_starttag(tag, method, attrs)
return 1
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
else:
self.report_unbalanced(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- report an unbalanced </...> tag.
def report_unbalanced(self, tag):
if self.verbose:
print '*** Unbalanced </' + tag + '>'
print '*** Stack:', self.stack
def convert_charref(self, name):
"""Convert character reference, may be overridden."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127:
return
return self.convert_codepoint(n)
def convert_codepoint(self, codepoint):
return chr(codepoint)
def handle_charref(self, name):
"""Handle character reference, no need to override."""
replacement = self.convert_charref(name)
if replacement is None:
self.unknown_charref(name)
else:
self.handle_data(replacement)
# Definition of entities -- derived classes may override
entitydefs = \
{'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
def convert_entityref(self, name):
"""Convert entity references.
As an alternative to overriding this method; one can tailor the
results by setting up the self.entitydefs mapping appropriately.
"""
table = self.entitydefs
if name in table:
return table[name]
else:
return
def handle_entityref(self, name):
"""Handle entity references, no need to override."""
replacement = self.convert_entityref(name)
if replacement is None:
self.unknown_entityref(name)
else:
self.handle_data(replacement)
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle declaration, could be overridden
def handle_decl(self, decl):
pass
# Example -- handle processing instruction, could be overridden
def handle_pi(self, data):
pass
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, ref): pass
class TestSGMLParser(SGMLParser):
def __init__(self, verbose=0):
self.testdata = ""
SGMLParser.__init__(self, verbose)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(repr(self.testdata)) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', repr(data)
def handle_comment(self, data):
self.flush()
r = repr(data)
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs:
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def unknown_decl(self, data):
self.flush()
print '*** unknown decl: [' + data + ']'
def close(self):
SGMLParser.close(self)
self.flush()
def test(args = None):
import sys
if args is None:
args = sys.argv[1:]
if args and args[0] == '-s':
args = args[1:]
klass = SGMLParser
else:
klass = TestSGMLParser
if args:
file = args[0]
else:
file = 'test.html'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
for c in data:
x.feed(c)
x.close()
if __name__ == '__main__':
test()
|
apache-2.0
|
kmoocdev2/edx-platform
|
openedx/features/course_experience/views/course_outline.py
|
1
|
5344
|
"""
Views to show a course outline.
"""
import re
import datetime
from completion import waffle as completion_waffle
from django.contrib.auth.models import User
from django.template.context_processors import csrf
from django.template.loader import render_to_string
from opaque_keys.edx.keys import CourseKey
from pytz import UTC
from waffle.models import Switch
from web_fragments.fragment import Fragment
from courseware.courses import get_course_overview_with_access
from openedx.core.djangoapps.plugin_api.views import EdxFragmentView
from student.models import CourseEnrollment
from util.milestones_helpers import get_course_content_milestones
from xmodule.modulestore.django import modulestore
from ..utils import get_course_outline_block_tree, get_resume_block
DEFAULT_COMPLETION_TRACKING_START = datetime.datetime(2018, 1, 24, tzinfo=UTC)
class CourseOutlineFragmentView(EdxFragmentView):
"""
Course outline fragment to be shown in the unified course view.
"""
def render_to_fragment(self, request, course_id=None, page_context=None, **kwargs):
"""
Renders the course outline as a fragment.
"""
course_key = CourseKey.from_string(course_id)
course_overview = get_course_overview_with_access(request.user, 'load', course_key, check_if_enrolled=True)
course = modulestore().get_course(course_key)
course_block_tree = get_course_outline_block_tree(request, course_id)
if not course_block_tree:
return None
context = {
'csrf': csrf(request)['csrf_token'],
'course': course_overview,
'due_date_display_format': course.due_date_display_format,
'blocks': course_block_tree
}
resume_block = get_resume_block(course_block_tree)
if not resume_block:
self.mark_first_unit_to_resume(course_block_tree)
xblock_display_names = self.create_xblock_id_and_name_dict(course_block_tree)
gated_content = self.get_content_milestones(request, course_key)
context['gated_content'] = gated_content
context['xblock_display_names'] = xblock_display_names
html = render_to_string('course_experience/course-outline-fragment.html', context)
return Fragment(html)
def create_xblock_id_and_name_dict(self, course_block_tree, xblock_display_names=None):
"""
Creates a dictionary mapping xblock IDs to their names, using a course block tree.
"""
if xblock_display_names is None:
xblock_display_names = {}
if course_block_tree.get('id'):
xblock_display_names[course_block_tree['id']] = course_block_tree['display_name']
if course_block_tree.get('children'):
for child in course_block_tree['children']:
self.create_xblock_id_and_name_dict(child, xblock_display_names)
return xblock_display_names
def get_content_milestones(self, request, course_key):
"""
Returns dict of subsections with prerequisites and whether the prerequisite has been completed or not
"""
def _get_key_of_prerequisite(namespace):
return re.sub('.gating', '', namespace)
all_course_milestones = get_course_content_milestones(course_key)
uncompleted_prereqs = {
milestone['content_id']
for milestone in get_course_content_milestones(course_key, user_id=request.user.id)
}
gated_content = {
milestone['content_id']: {
'completed_prereqs': milestone['content_id'] not in uncompleted_prereqs,
'prerequisite': _get_key_of_prerequisite(milestone['namespace'])
}
for milestone in all_course_milestones
}
return gated_content
def user_enrolled_after_completion_collection(self, user, course_key):
"""
Checks that the user has enrolled in the course after 01/24/2018, the date that
the completion API began data collection. If the user has enrolled in the course
before this date, they may see incomplete collection data. This is a temporary
check until all active enrollments are created after the date.
"""
user = User.objects.get(username=user)
try:
user_enrollment = CourseEnrollment.objects.get(
user=user,
course_id=course_key,
is_active=True
)
return user_enrollment.created > self._completion_data_collection_start()
except CourseEnrollment.DoesNotExist:
return False
def _completion_data_collection_start(self):
"""
Returns the date that the ENABLE_COMPLETION_TRACKING waffle switch was enabled.
"""
# pylint: disable=protected-access
switch_name = completion_waffle.waffle()._namespaced_name(completion_waffle.ENABLE_COMPLETION_TRACKING)
try:
return Switch.objects.get(name=switch_name).created
except Switch.DoesNotExist:
return DEFAULT_COMPLETION_TRACKING_START
def mark_first_unit_to_resume(self, block_node):
children = block_node.get('children')
if children:
children[0]['resume_block'] = True
self.mark_first_unit_to_resume(children[0])
|
agpl-3.0
|
marcreyesph/scancode-toolkit
|
tests/licensedcode/test_detect.py
|
3
|
47823
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import os
from unittest.case import skip
from commoncode.testcase import FileBasedTesting
from licensedcode import index
from licensedcode.match import LicenseMatch
from licensedcode.match import get_texts
from licensedcode import models
from licensedcode.models import Rule
from licensedcode.spans import Span
from licensedcode import match_aho
from licensedcode import match_seq
from license_test_utils import print_matched_texts
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
"""
Test the core license detection mechanics.
"""
class TestIndexMatch(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def test_match_does_not_return_matches_for_empty_query(self):
idx = index.LicenseIndex([Rule(_text='A one. A two. license A three.')])
matches = idx.match(query_string='')
assert [] == matches
matches = idx.match(query_string=None)
assert [] == matches
def test_match_does_not_return_matches_for_junk_queries(self):
idx = index.LicenseIndex([Rule(_text='A one. a license two. license A three.')])
assert [] == idx.match(query_string=u'some other junk')
assert [] == idx.match(query_string=u'some junk')
def test_match_return_one_match_with_correct_offsets(self):
idx = index.LicenseIndex([Rule(_text='A one. a license two. A three.', licenses=['abc'])])
querys = u'some junk. A one. A license two. A three.'
# 0 1 2 3 4 5 6 7 8
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
qtext, itext = get_texts(match, query_string=querys, idx=idx)
assert 'A one A license two A three' == qtext
assert 'A one a license two A three' == itext
assert Span(0, 6) == match.qspan
assert Span(0, 6) == match.ispan
def test_match_can_match_exactly_rule_text_used_as_query(self):
test_file = self.get_test_loc('detect/mit/mit.c')
rule = Rule(text_file=test_file, licenses=['mit'])
idx = index.LicenseIndex([rule])
matches = idx.match(test_file)
assert 1 == len(matches)
match = matches[0]
assert rule == match.rule
assert Span(0, 86) == match.qspan
assert Span(0, 86) == match.ispan
assert 100 == match.coverage()
assert 100 == match.score()
def test_match_matches_correctly_simple_exact_query_1(self):
tf1 = self.get_test_loc('detect/mit/mit.c')
ftr = Rule(text_file=tf1, licenses=['mit'])
idx = index.LicenseIndex([ftr])
query_doc = self.get_test_loc('detect/mit/mit2.c')
matches = idx.match(query_doc)
assert 1 == len(matches)
match = matches[0]
assert ftr == match.rule
assert Span(0, 86) == match.qspan
assert Span(0, 86) == match.ispan
def test_match_matches_correctly_simple_exact_query_across_query_runs(self):
tf1 = self.get_test_loc('detect/mit/mit.c')
ftr = Rule(text_file=tf1, licenses=['mit'])
idx = index.LicenseIndex([ftr])
query_doc = self.get_test_loc('detect/mit/mit3.c')
matches = idx.match(query_doc)
assert 1 == len(matches)
match = matches[0]
qtext, itext = get_texts(match, location=query_doc, idx=idx)
expected_qtext = u'''
Permission is hereby granted free of charge to any person obtaining a
copy of this software and associated documentation files the Software to
deal in THE SOFTWARE WITHOUT RESTRICTION INCLUDING WITHOUT LIMITATION THE
RIGHTS TO USE COPY MODIFY MERGE PUBLISH DISTRIBUTE SUBLICENSE AND OR SELL
COPIES of the Software and to permit persons to whom the Software is
furnished to do so subject to the following conditions The above
copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software
'''.split()
assert expected_qtext == qtext.split()
expected_itext = u'''
Permission is hereby granted free of charge to any person obtaining a
copy of this software and associated documentation files the Software to
deal in the Software without restriction including without limitation
the rights to use copy modify merge publish distribute sublicense and or
sell copies of the Software and to permit persons to whom the Software
is furnished to do so subject to the following conditions The above
copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software
'''.split()
assert expected_itext == itext.split()
def test_match_with_surrounding_junk_should_return_an_exact_match(self):
tf1 = self.get_test_loc('detect/mit/mit.c')
ftr = Rule(text_file=tf1, licenses=['mit'])
idx = index.LicenseIndex([ftr])
query_loc = self.get_test_loc('detect/mit/mit4.c')
matches = idx.match(query_loc)
assert len(matches) == 1
match = matches[0]
qtext, itext = get_texts(match, location=query_loc, idx=idx)
expected_qtext = u'''
Permission [add] [text] is hereby granted free of charge to any person
obtaining a copy of this software and associated documentation files the
Software to deal in the Software without restriction including without
limitation the rights to use copy modify merge publish distribute
sublicense and or sell copies of the Software and to permit persons to
whom the Software is furnished to do so subject to the following
conditions The above copyright [add] [text] notice and this permission
notice shall be included in all copies or substantial portions of the
Software
'''.split()
assert expected_qtext == qtext.split()
expected_itext = u'''
Permission is hereby granted free of charge to any person obtaining a
copy of this software and associated documentation files the Software to
deal in the Software without restriction including without limitation the
rights to use copy modify merge publish distribute sublicense and or sell
copies of the Software and to permit persons to whom the Software is
furnished to do so subject to the following conditions The above
copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software
'''.split()
assert expected_itext == itext.split()
assert Span(0, 86) == match.qspan
assert Span(0, 86) == match.ispan
assert 100 == match.score()
def test_match_can_match_approximately(self):
rule_file = self.get_test_loc('approx/mit/mit.c')
rule = Rule(text_file=rule_file, licenses=['mit'])
idx = index.LicenseIndex([rule])
query_doc = self.get_test_loc('approx/mit/mit4.c')
matches = idx.match(query_doc)
assert 2 == len(matches)
m1 = matches[0]
m2 = matches[1]
assert rule == m1.rule
assert rule == m2.rule
assert 100 == m1.coverage()
assert 100 == m2.coverage()
assert 100 == m1.score()
assert 100 == m2.score()
def test_match_return_correct_positions_with_short_index_and_queries(self):
idx = index.LicenseIndex([Rule(_text='MIT License', licenses=['mit'])])
matches = idx.match(query_string='MIT License')
assert 1 == len(matches)
assert {'_tst_11_0': {'mit': [0]}} == idx.to_dict()
qtext, itext = get_texts(matches[0], query_string='MIT License', idx=idx)
assert 'MIT License' == qtext
assert 'MIT License' == itext
assert Span(0, 1) == matches[0].qspan
assert Span(0, 1) == matches[0].ispan
matches = idx.match(query_string='MIT MIT License')
assert 1 == len(matches)
qtext, itext = get_texts(matches[0], query_string='MIT MIT License', idx=idx)
assert 'MIT License' == qtext
assert 'MIT License' == itext
assert Span(1, 2) == matches[0].qspan
assert Span(0, 1) == matches[0].ispan
query_doc1 = 'do you think I am a mit license MIT License, yes, I think so'
# # 0 1 2 3
matches = idx.match(query_string=query_doc1)
assert 2 == len(matches)
qtext, itext = get_texts(matches[0], query_string=query_doc1, idx=idx)
assert 'mit license' == qtext
assert 'MIT License' == itext
assert Span(0, 1) == matches[0].qspan
assert Span(0, 1) == matches[0].ispan
qtext, itext = get_texts(matches[1], query_string=query_doc1, idx=idx)
assert 'MIT License' == qtext
assert 'MIT License' == itext
assert Span(2, 3) == matches[1].qspan
assert Span(0, 1) == matches[1].ispan
query_doc2 = '''do you think I am a mit license
MIT License
yes, I think so'''
matches = idx.match(query_string=query_doc2)
assert 2 == len(matches)
qtext, itext = get_texts(matches[0], query_string=query_doc2, idx=idx)
assert 'mit license' == qtext
assert 'MIT License' == itext
assert Span(0, 1) == matches[0].qspan
assert Span(0, 1) == matches[0].ispan
qtext, itext = get_texts(matches[1], query_string=query_doc2, idx=idx)
assert 'MIT License' == qtext
assert 'MIT License' == itext
assert Span(2, 3) == matches[1].qspan
assert Span(0, 1) == matches[1].ispan
def test_match_simple_rule(self):
tf1 = self.get_test_loc('detect/mit/t1.txt')
ftr = Rule(text_file=tf1, licenses=['bsd-original'])
idx = index.LicenseIndex([ftr])
query_doc = self.get_test_loc('detect/mit/t2.txt')
matches = idx.match(query_doc)
assert 1 == len(matches)
match = matches[0]
assert Span(0, 241) == match.qspan
assert Span(0, 241) == match.ispan
assert (1, 27,) == match.lines()
assert 100 == match.coverage()
assert 100 == match.score()
def test_match_works_with_special_characters_1(self):
test_file = self.get_test_loc('detect/specialcharacter/kerberos.txt')
idx = index.LicenseIndex([Rule(text_file=test_file, licenses=['kerberos'])])
assert 1 == len(idx.match(test_file))
def test_match_works_with_special_characters_2(self):
test_file = self.get_test_loc('detect/specialcharacter/kerberos1.txt')
idx = index.LicenseIndex([Rule(text_file=test_file, licenses=['kerberos'])])
assert 1 == len(idx.match(test_file))
def test_match_works_with_special_characters_3(self):
test_file = self.get_test_loc('detect/specialcharacter/kerberos2.txt')
idx = index.LicenseIndex([Rule(text_file=test_file, licenses=['kerberos'])])
assert 1 == len(idx.match(test_file))
def test_match_works_with_special_characters_4(self):
test_file = self.get_test_loc('detect/specialcharacter/kerberos3.txt')
idx = index.LicenseIndex([Rule(text_file=test_file, licenses=['kerberos'])])
assert 1 == len(idx.match(test_file))
def test_overlap_detection1(self):
# test this containment relationship between test and index licenses:
# * Index licenses:
# +-license 2 --------+
# | +-license 1 --+ |
# +-------------------+
#
# * License texts to detect:
# +- license 3 -----------+
# | +-license 2 --------+ |
# | | +-license 1 --+ | |
# | +-------------------+ |
# +-----------------------+
#
# +-license 4 --------+
# | +-license 1 --+ |
# +-------------------+
# setup index
license1 = '''Redistribution and use permitted.'''
license2 = '''Redistributions of source must retain copyright.
Redistribution and use permitted.
Redistributions in binary form is permitted.'''
license3 = '''
this license source
Redistributions of source must retain copyright.
Redistribution and use permitted.
Redistributions in binary form is permitted.
has a permitted license'''
license4 = '''My Redistributions is permitted.
Redistribution and use permitted.
Use is permitted too.'''
rule1 = Rule(_text=license1, licenses=['overlap'])
rule2 = Rule(_text=license2, licenses=['overlap'])
rule3 = Rule(_text=license3, licenses=['overlap'])
rule4 = Rule(_text=license4, licenses=['overlap'])
idx = index.LicenseIndex([rule1, rule2, rule3, rule4])
querys = 'Redistribution and use bla permitted.'
# test : license1 is in the index and contains no other rule. should return rule1 at exact coverage.
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert Span(0, 3) == match.qspan
assert rule1 == match.rule
qtext, _itext = get_texts(match, query_string=querys, idx=idx)
assert 'Redistribution and use [bla] permitted' == qtext
def test_overlap_detection2(self):
# test this containment relationship between test and index licenses:
# * Index licenses:
# +-license 2 --------+
# | +-license 1 --+ |
# +-------------------+
# setup index
license1 = '''Redistribution and use permitted.'''
license2 = '''Redistributions of source must retain copyright.
Redistribution and use permitted.
Redistributions in binary form is permitted.'''
rule1 = Rule(_text=license1, licenses=['overlap'])
rule2 = Rule(_text=license2, licenses=['overlap'])
idx = index.LicenseIndex([rule1, rule2])
# test : license2 contains license1: return license2 as exact coverage
querys = 'Redistribution and use bla permitted.'
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert rule1 == match.rule
qtext, _itext = get_texts(match, query_string=querys, idx=idx)
assert 'Redistribution and use [bla] permitted' == qtext
def test_overlap_detection2_exact(self):
# test this containment relationship between test and index licenses:
# * Index licenses:
# +-license 2 --------+
# | +-license 1 --+ |
# +-------------------+
# setup index
license1 = '''Redistribution and use permitted.'''
license2 = '''Redistributions of source must retain copyright.
Redistribution and use permitted.
Redistributions in binary form is permitted.'''
rule1 = Rule(_text=license1, licenses=['overlap'])
rule2 = Rule(_text=license2, licenses=['overlap'])
idx = index.LicenseIndex([rule1, rule2])
# test : license2 contains license1: return license2 as exact coverage
querys = 'Redistribution and use bla permitted.'
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert rule1 == match.rule
qtext, _itext = get_texts(match, query_string=querys, idx=idx)
assert 'Redistribution and use [bla] permitted' == qtext
def test_overlap_detection3(self):
# test this containment relationship between test and index licenses:
# * Index licenses:
# +-license 2 --------+
# | +-license 1 --+ |
# +-------------------+
#
# * License texts to detect:
# +- license 3 -----------+
# | +-license 2 --------+ |
# | | +-license 1 --+ | |
# | +-------------------+ |
# +-----------------------+
#
# setup index
license1 = '''Redistribution and use permitted.'''
license2 = '''Redistributions of source must retain copyright.
Redistribution and use permitted.
Redistributions in binary form is permitted.'''
rule1 = Rule(_text=license1, licenses=['overlap'])
rule2 = Rule(_text=license2, licenses=['overlap'])
idx = index.LicenseIndex([rule1, rule2])
querys = '''My source.
Redistributions of source must retain copyright.
Redistribution and use permitted.
Redistributions in binary form is permitted.
My code.'''
# test : querys contains license2 that contains license1: return license2 as exact coverage
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert rule2 == match.rule
qtext, _itext = get_texts(match, query_string=querys, idx=idx)
expected = '''
Redistributions of source must retain copyright
Redistribution and use permitted
Redistributions in binary form is permitted'''.split()
assert expected == qtext.split()
def test_overlap_detection4(self):
# test this containment relationship between test and index licenses:
# * Index licenses:
# +-license 2 --------+
# | +-license 1 --+ |
# +-------------------+
#
# +-license 4 --------+
# | +-license 1 --+ |
# +-------------------+
# setup index
license1 = '''Redistribution and use permitted.'''
license2 = '''Redistributions of source must retain copyright.
Redistribution and use permitted.
Redistributions in binary form is permitted.'''
rule1 = Rule(_text=license1, licenses=['overlap'])
rule2 = Rule(_text=license2, licenses=['overlap'])
idx = index.LicenseIndex([rule1, rule2])
querys = '''My source.
Redistribution and use permitted.
My code.'''
# test : querys contains license1: return license1 as exact coverage
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert rule1 == match.rule
qtext, _itext = get_texts(match, query_string=querys, idx=idx)
assert 'Redistribution and use permitted' == qtext
def test_overlap_detection5(self):
# test this containment relationship between test and index licenses:
# * Index licenses:
# +-license 2 --------+
# | +-license 1 --+ |
# +-------------------+
#
# +-license 4 --------+
# | +-license 1 --+ |
# +-------------------+
# setup index
license1 = '''Redistribution and use permitted for MIT license.'''
license2 = '''Redistributions of source must retain copyright.
Redistribution and use permitted for MIT license.
Redistributions in binary form is permitted.'''
rule1 = Rule(_text=license1, licenses=['overlap'])
rule2 = Rule(_text=license2, licenses=['overlap'])
idx = index.LicenseIndex([rule1, rule2])
querys = '''My source.
Redistribution and use permitted for MIT license.
My code.'''
# test : querys contains license1: return license1 as exact coverage
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert rule1 == match.rule
qtext, _itext = get_texts(match, query_string=querys, idx=idx)
assert 'Redistribution and use permitted for MIT license' == qtext
def test_fulltext_detection_works_with_partial_overlap_from_location(self):
test_doc = self.get_test_loc('detect/templates/license3.txt')
idx = index.LicenseIndex([Rule(text_file=test_doc, licenses=['mylicense'])])
query_loc = self.get_test_loc('detect/templates/license4.txt')
matches = idx.match(query_loc)
assert 1 == len(matches)
match = matches[0]
assert Span(0, 41) == match.qspan
assert Span(0, 41) == match.ispan
assert 100 == match.coverage()
assert 100 == match.score()
qtext, _itext = get_texts(match, location=query_loc, idx=idx)
expected = '''
is free software you can redistribute it and or modify it under the terms
of the GNU Lesser General Public License as published by the Free
Software Foundation either version 2 1 of the License or at your option
any later version
'''.split()
assert expected == qtext.split()
class TestIndexMatchWithTemplate(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def test_match_can_match_with_plain_rule_simple(self):
tf1_text = u'''X11 License
Copyright (C) 1996 X Consortium
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above copyright
notice and this permission notice shall be included in all copies or
substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS",
WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE X CONSORTIUM BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE. Except as contained in this notice, the
name of the X Consortium shall not be used in advertising or otherwise to
promote the sale, use or other dealings in this Software without prior
written authorization from the X Consortium. X Window System is a trademark
of X Consortium, Inc.
'''
rule = Rule(_text=tf1_text, licenses=['x-consortium'])
idx = index.LicenseIndex([rule])
query_loc = self.get_test_loc('detect/simple_detection/x11-xconsortium_text.txt')
matches = idx.match(query_loc)
assert 1 == len(matches)
match = matches[0]
assert Span(0, 216) == match.qspan
def test_match_can_match_with_plain_rule_simple2(self):
rule_text = u'''X11 License
Copyright (C) 1996 X Consortium
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above copyright
notice and this permission notice shall be included in all copies or
substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS",
WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE X CONSORTIUM BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE. Except as contained in this notice, the
name of the X Consortium shall not be used in advertising or otherwise to
promote the sale, use or other dealings in this Software without prior
written authorization from the X Consortium. X Window System is a trademark
of X Consortium, Inc.
'''
rule = Rule(_text=rule_text, licenses=['x-consortium'])
idx = index.LicenseIndex([rule])
query_loc = self.get_test_loc('detect/simple_detection/x11-xconsortium_text.txt')
matches = idx.match(location=query_loc)
assert 1 == len(matches)
expected_qtext = u'''
X11 License Copyright C 1996 X Consortium Permission is hereby granted free
of charge to any person obtaining a copy of this software and associated
documentation files the Software to deal in the Software without restriction
including without limitation the rights to use copy modify merge publish
distribute sublicense and or sell copies of the Software and to permit
persons to whom the Software is furnished to do so subject to the following
conditions The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software THE SOFTWARE
IS PROVIDED AS IS WITHOUT WARRANTY OF ANY KIND EXPRESS OR IMPLIED INCLUDING
BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT IN NO EVENT SHALL THE X CONSORTIUM BE LIABLE FOR
ANY CLAIM DAMAGES OR OTHER LIABILITY WHETHER IN AN ACTION OF CONTRACT TORT OR
OTHERWISE ARISING FROM OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE Except as contained in this notice the name
of the X Consortium shall not be used in advertising or otherwise to promote
the sale use or other dealings in this Software without prior written
authorization from the X Consortium X Window System is a trademark of X
Consortium Inc
'''.split()
match = matches[0]
qtext, _itext = get_texts(match, location=query_loc, idx=idx)
assert expected_qtext == qtext.split()
def test_match_can_match_with_simple_rule_template2(self):
rule_text = u'''
IN NO EVENT SHALL THE {{X CONSORTIUM}}
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
rule = Rule(_text=rule_text, licenses=['x-consortium'])
idx = index.LicenseIndex([rule])
query_string = u'''
IN NO EVENT SHALL THE Y CORP
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
matches = idx.match(query_string=query_string)
assert 1 == len(matches)
match = matches[0]
qtext, itext = get_texts(match, query_string=query_string, idx=idx)
expected_qtokens = u'''
IN NO EVENT SHALL THE [Y] [CORP] BE LIABLE FOR ANY CLAIM DAMAGES OR OTHER
LIABILITY WHETHER IN AN ACTION OF CONTRACT TORT OR OTHERWISE ARISING FROM OUT
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE
'''.split()
expected_itokens = u'''
IN NO EVENT SHALL THE BE LIABLE FOR ANY CLAIM DAMAGES OR OTHER LIABILITY
WHETHER IN AN ACTION OF CONTRACT TORT OR OTHERWISE ARISING FROM OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
'''.split()
assert expected_qtokens == qtext.split()
assert expected_itokens == itext.split()
def test_match_can_match_with_rule_template_with_inter_gap_of_2(self):
# in this template text there are only 2 tokens between the two templates markers
test_text = u'''Redistributions in binary form must
{{}} reproduce the {{}}above copyright notice'''
rule = Rule(_text=test_text, licenses=['mylicense'])
idx = index.LicenseIndex([rule])
querys = u'''Redistributions in binary form must nexB company
reproduce the word for word above copyright notice.'''
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert 100 == match.coverage()
assert 50 == match.score()
assert Span(0, 9) == match.qspan
assert Span(0, 9) == match.ispan
def test_match_can_match_with_rule_template_with_inter_gap_of_3(self):
# in this template there are 3 tokens between the two template markers
test_text = u'''Redistributions in binary form must
{{}} reproduce the stipulated {{}}above copyright notice'''
rule = Rule(_text=test_text, licenses=['mylicense'])
idx = index.LicenseIndex([rule])
querys = u'''Redistributions in binary form must nexB company
reproduce the stipulated word for word above copyright notice.'''
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert 100 == match.coverage()
assert 55 == match.score()
assert Span(0, 10) == match.qspan
assert Span(0, 10) == match.ispan
def test_match_can_match_with_rule_template_with_inter_gap_of_4(self):
# in this template there are 4 tokens between the two templates markers
test_text = u'''Redistributions in binary form must
{{}} reproduce as is stipulated {{}}above copyright notice'''
rule = Rule(_text=test_text, licenses=['mylicense'])
idx = index.LicenseIndex([rule])
querys = u'''Redistributions in binary form must nexB company
reproduce as is stipulated the word for word above copyright notice.'''
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert Span(0, 11) == match.qspan
assert Span(0, 11) == match.ispan
def test_match_can_match_with_rule_template_for_public_domain(self):
test_text = '''
I hereby abandon any property rights to {{SAX 2.0 (the Simple API for
XML)}}, and release all of {{the SAX 2.0 }} source code, compiled code,
and documentation contained in this distribution into the Public Domain.
'''
rule = Rule(_text=test_text, licenses=['public-domain'])
idx = index.LicenseIndex([rule])
querys = '''
SAX2 is Free!
I hereby abandon any property rights to SAX 2.0 (the Simple API for
XML), and release all of the SAX 2.0 source code, compiled code, and
documentation contained in this distribution into the Public Domain. SAX
comes with NO WARRANTY or guarantee of fitness for any purpose.
SAX2 is Free!
'''
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
qtext, itext = get_texts(match, query_string=querys, idx=idx)
expected_qtext = u'''
I hereby abandon any property rights to [SAX] [2] [0] <the> [Simple] [API] [for] [XML]
<and> <release> <all> <of> <the> [SAX] [2] [0]
source code compiled code and documentation contained in this distribution
into the Public Domain
'''.split()
assert expected_qtext == qtext.split()
expected_itext = u'''
I hereby abandon any property rights to
<and> <release> <all> <of>
source code compiled code and documentation contained in this distribution
into the Public Domain
'''.split()
assert expected_itext == itext.split()
assert 80 < match.coverage()
assert 84 == match.score()
assert Span(0, 6) | Span(13, 26) == match.qspan
assert Span(0, 6) | Span(11, 24) == match.ispan
def test_match_can_match_with_rule_template_with_gap_near_start_with_few_tokens_before(self):
# failed when a gapped token starts at a beginning of rule with few tokens before
test_file = self.get_test_loc('detect/templates/license7.txt')
rule = Rule(text_file=test_file, licenses=['lic'])
idx = index.LicenseIndex([rule])
qloc = self.get_test_loc('detect/templates/license8.txt')
matches = idx.match(qloc)
assert 1 == len(matches)
match = matches[0]
expected_qtokens = u"""
All Rights Reserved Redistribution and use of this software and associated
documentation Software with or without modification are permitted provided
that the following conditions are met
1 Redistributions of source code must retain copyright statements and notices
Redistributions must also contain a copy of this document
2 Redistributions in binary form must reproduce the above copyright notice
this list of conditions and the following disclaimer in the documentation and
or other materials provided with the distribution
3 The name [groovy] must not be used to endorse or promote products derived
from this Software without prior written permission of <The> [Codehaus] For
written permission please contact [info] [codehaus] [org]
4 Products derived from this Software may not be called [groovy] nor may
[groovy] appear in their names without prior written permission of <The>
[Codehaus]
[groovy] is a registered trademark of <The> [Codehaus]
5 Due credit should be given to <The> [Codehaus]
[http] [groovy] [codehaus] [org]
<THIS> <SOFTWARE> <IS> <PROVIDED> <BY> <THE> [CODEHAUS] <AND> <CONTRIBUTORS>
AS IS AND ANY EXPRESSED OR IMPLIED WARRANTIES INCLUDING BUT NOT LIMITED TO
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED IN NO EVENT SHALL <THE> [CODEHAUS] OR ITS
CONTRIBUTORS BE LIABLE FOR ANY DIRECT INDIRECT INCIDENTAL SPECIAL EXEMPLARY
OR CONSEQUENTIAL DAMAGES INCLUDING BUT NOT LIMITED TO PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES LOSS OF USE DATA OR PROFITS OR BUSINESS
INTERRUPTION HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY WHETHER IN
CONTRACT STRICT LIABILITY OR TORT INCLUDING NEGLIGENCE OR OTHERWISE ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE
""".split()
expected_itokens = u''' All Rights Reserved Redistribution and use of this
software and associated documentation Software with or without modification
are permitted provided that the following conditions are met
1 Redistributions of source code must retain copyright statements and notices
Redistributions must also contain a copy of this document
2 Redistributions in binary form must reproduce the above copyright notice
this list of conditions and the following disclaimer in the documentation and
or other materials provided with the distribution
3 The name must not be used to endorse or promote products derived from this
Software without prior written permission of For written permission please
contact
4 Products derived from this Software may not be called nor may appear in
their names without prior written permission of is a registered trademark of
5 Due credit should be given to
<THIS> <SOFTWARE> <IS> <PROVIDED> <BY>
AS IS AND ANY EXPRESSED OR IMPLIED WARRANTIES INCLUDING BUT NOT LIMITED TO
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED IN NO EVENT SHALL OR ITS CONTRIBUTORS BE LIABLE FOR
ANY DIRECT INDIRECT INCIDENTAL SPECIAL EXEMPLARY OR CONSEQUENTIAL DAMAGES
INCLUDING BUT NOT LIMITED TO PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES LOSS
OF USE DATA OR PROFITS OR BUSINESS INTERRUPTION HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY WHETHER IN CONTRACT STRICT LIABILITY OR TORT INCLUDING
NEGLIGENCE OR OTHERWISE ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
'''.split()
qtext, itext = get_texts(match, location=qloc, idx=idx)
assert expected_qtokens == qtext.split()
assert expected_itokens == itext.split()
assert 97 < match.coverage()
assert 97 < match.score()
expected = Span(2, 98) | Span(100, 125) | Span(127, 131) | Span(133, 139) | Span(149, 178) | Span(180, 253)
assert expected == match.qspan
assert Span(1, 135) | Span(141, 244) == match.ispan
def test_match_can_match_with_index_built_from_rule_directory_with_sun_bcls(self):
rule_dir = self.get_test_loc('detect/rule_template/rules')
idx = index.LicenseIndex(models.load_rules(rule_dir))
# at line 151 the query has an extra "Software" word inserted to avoid hash matching
query_loc = self.get_test_loc('detect/rule_template/query.txt')
matches = idx.match(location=query_loc)
assert 1 == len(matches)
match = matches[0]
assert Span(0, 957) | Span(959, 1756) == match.qspan
assert match_seq.MATCH_SEQ == match.matcher
class TestMatchAccuracyWithFullIndex(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def check_position(self, test_path, expected, with_span=True, print_results=False):
"""
Check license detection in file or folder against expected result.
Expected is a list of (license, lines span, qspan span) tuples.
"""
test_location = self.get_test_loc(test_path)
results = []
# FULL INDEX!!
idx = index.get_index()
matches = idx.match(test_location)
for match in matches:
for detected in match.rule.licenses:
if print_results:
print()
print(match)
print_matched_texts(match, location=test_location, idx=idx)
results.append((detected, match.lines(), with_span and match.qspan or None))
assert expected == results
def test_match_has_correct_positions_basic(self):
idx = index.get_index()
querys = u'''Licensed under the GNU General Public License (GPL).
Licensed under the GNU General Public License (GPL).
Licensed under the GNU General Public License (GPL).'''
matches = idx.match(query_string=querys)
rule = [r for r in idx.rules_by_rid if r.identifier == 'gpl_69.RULE'][0]
m1 = LicenseMatch(rule=rule, qspan=Span(0, 7), ispan=Span(0, 7), start_line=1, end_line=1)
m2 = LicenseMatch(rule=rule, qspan=Span(8, 15), ispan=Span(0, 7), start_line=2, end_line=2)
m3 = LicenseMatch(rule=rule, qspan=Span(16, 23), ispan=Span(0, 7), start_line=3, end_line=3)
assert [m1, m2, m3] == matches
def test_match_has_correct_line_positions_for_query_with_repeats(self):
expected = [
# licenses, match.lines(), qtext,
([u'apache-2.0'], (1, 2), u'The Apache Software License Version 2 0 http www apache org licenses LICENSE 2 0 txt'),
([u'apache-2.0'], (3, 4), u'The Apache Software License Version 2 0 http www apache org licenses LICENSE 2 0 txt'),
([u'apache-2.0'], (5, 6), u'The Apache Software License Version 2 0 http www apache org licenses LICENSE 2 0 txt'),
([u'apache-2.0'], (7, 8), u'The Apache Software License Version 2 0 http www apache org licenses LICENSE 2 0 txt'),
([u'apache-2.0'], (9, 10), u'The Apache Software License Version 2 0 http www apache org licenses LICENSE 2 0 txt'),
]
test_path = 'positions/license1.txt'
test_location = self.get_test_loc(test_path)
idx = index.get_index()
matches = idx.match(test_location)
for i, match in enumerate(matches):
ex_lics, ex_lines, ex_qtext = expected[i]
qtext, _itext = get_texts(match, location=test_location, idx=idx)
try:
assert ex_lics == match.rule.licenses
assert ex_lines == match.lines()
assert ex_qtext == qtext
except AssertionError:
assert expected[i] == (match.rule.licenses, match.lines(), qtext)
def test_match_does_not_return_spurious_match(self):
expected = []
self.check_position('positions/license2.txt', expected)
def test_match_has_correct_line_positions_for_repeats(self):
# we had a weird error where the lines were not computed correctly
# when we had more than one file detected at a time
expected = [
# detected, match.lines(), match.qspan,
(u'apache-2.0', (1, 2), Span(0, 15)),
(u'apache-2.0', (3, 4), Span(16, 31)),
(u'apache-2.0', (5, 6), Span(32, 47)),
(u'apache-2.0', (7, 8), Span(48, 63)),
(u'apache-2.0', (9, 10), Span(64, 79)),
]
self.check_position('positions/license3.txt', expected)
def test_match_works_for_apache_rule(self):
idx = index.get_index()
querys = u'''I am not a license.
The Apache Software License, Version 2.0
http://www.apache.org/licenses/LICENSE-2.0.txt
'''
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert 'apache-2.0_8.RULE' == match.rule.identifier
assert match_aho.MATCH_AHO_EXACT == match.matcher
qtext, _itext = get_texts(match, query_string=querys, idx=idx)
assert u'The Apache Software License Version 2 0 http www apache org licenses LICENSE 2 0 txt' == qtext
assert (3, 4) == match.lines()
def test_match_does_not_detect_spurrious_short_apache_rule(self):
idx = index.get_index()
querys = u'''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
<title>Apache log4j 1.2 - Continuous Integration</title>
'''
matches = idx.match(query_string=querys)
assert [] == matches
def test_match_handles_negative_rules_and_does_not_match_negative_regions_properly(self):
# note: this test relies on the negative rule: not-a-license_busybox_2.RULE
# with this text:
# "libbusybox is GPL, not LGPL, and exports no stable API that might act as a copyright barrier."
# and relies on the short rules that detect GPL and LGPL
idx = index.get_index()
# lines 3 and 4 should NOT be part of any matches
# they should match the negative "not-a-license_busybox_2.RULE"
negative_lines_not_to_match = 3, 4
querys = u'''
licensed under the LGPL license
libbusybox is GPL, not LGPL, and exports no stable API
that might act as a copyright barrier.
for the license
license: dual BSD/GPL
'''
matches = idx.match(query_string=querys)
for match in matches:
for line in negative_lines_not_to_match:
assert line not in match.lines()
def test_match_has_correct_line_positions_in_automake_perl_file(self):
# reported as https://github.com/nexB/scancode-toolkit/issues/88
expected = [
# detected, match.lines(), match.qspan,
(u'gpl-2.0-plus', (12, 25), Span(48, 159)),
(u'fsf-mit', (231, 238), Span(950, 1014)),
(u'free-unknown', (306, 307), Span(1291, 1314))
]
self.check_position('positions/automake.pl', expected)
class TestMatchBinariesWithFullIndex(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def test_match_in_binary_lkms_1(self):
idx = index.get_index()
qloc = self.get_test_loc('positions/ath_pci.ko')
matches = idx.match(location=qloc)
assert 1 == len(matches)
match = matches[0]
assert ['bsd-new', 'gpl-2.0'] == match.rule.licenses
qtext, itext = get_texts(match, location=qloc, idx=idx)
assert 'license Dual BSD GPL' == qtext
assert 'license Dual BSD GPL' == itext
def test_match_in_binary_lkms_2(self):
idx = index.get_index()
qloc = self.get_test_loc('positions/eeepc_acpi.ko')
matches = idx.match(location=qloc)
assert 1 == len(matches)
match = matches[0]
assert ['gpl'] == match.rule.licenses
assert match.ispan == Span(0, 1)
qtext, itext = get_texts(match, location=qloc, idx=idx)
assert 'license GPL' == qtext
assert 'License GPL' == itext
def test_match_in_binary_lkms_3(self):
idx = index.get_index()
qloc = self.get_test_loc('positions/wlan_xauth.ko')
matches = idx.match(location=qloc)
assert 1 == len(matches)
match = matches[0]
assert ['bsd-new', 'gpl-2.0'] == match.rule.licenses
assert 100 == match.coverage()
assert 20 == match.score()
qtext, itext = get_texts(match, location=qloc, idx=idx)
assert 'license Dual BSD GPL' == qtext
assert 'license Dual BSD GPL' == itext
assert Span(0, 3) == match.ispan
@skip('Needs review')
class TestToFix(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def test_detection_in_complex_json(self):
# NOTE: this test cannot pass as we do not have several of the licenses
# listed in this JSON
test_file = self.get_test_loc('detect/json/all.json')
import json
item_map = json.load(test_file)
for item in item_map:
itemid = item_map[item
]['id',
]
content = itemid + ' \n ' + item_map[item
]['url',
] + ' \n ' + item_map[item
]['title',
]
tmp_file = self.get_temp_file()
fh = open(tmp_file, 'w')
fh.write(content)
fh.close()
|
apache-2.0
|
fharenheit/template-spark-app
|
src/main/python/streaming/sql_network_wordcount.py
|
76
|
3197
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Use DataFrames and SQL to count words in UTF8 encoded, '\n' delimited text received from the
network every second.
Usage: sql_network_wordcount.py <hostname> <port>
<hostname> and <port> describe the TCP server that Spark Streaming would connect to receive data.
To run this on your local machine, you need to first run a Netcat server
`$ nc -lk 9999`
and then run the example
`$ bin/spark-submit examples/src/main/python/streaming/sql_network_wordcount.py localhost 9999`
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.sql import Row, SparkSession
def getSparkSessionInstance(sparkConf):
if ('sparkSessionSingletonInstance' not in globals()):
globals()['sparkSessionSingletonInstance'] = SparkSession\
.builder\
.config(conf=sparkConf)\
.getOrCreate()
return globals()['sparkSessionSingletonInstance']
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: sql_network_wordcount.py <hostname> <port> ", file=sys.stderr)
exit(-1)
host, port = sys.argv[1:]
sc = SparkContext(appName="PythonSqlNetworkWordCount")
ssc = StreamingContext(sc, 1)
# Create a socket stream on target ip:port and count the
# words in input stream of \n delimited text (eg. generated by 'nc')
lines = ssc.socketTextStream(host, int(port))
words = lines.flatMap(lambda line: line.split(" "))
# Convert RDDs of the words DStream to DataFrame and run SQL query
def process(time, rdd):
print("========= %s =========" % str(time))
try:
# Get the singleton instance of SparkSession
spark = getSparkSessionInstance(rdd.context.getConf())
# Convert RDD[String] to RDD[Row] to DataFrame
rowRdd = rdd.map(lambda w: Row(word=w))
wordsDataFrame = spark.createDataFrame(rowRdd)
# Creates a temporary view using the DataFrame.
wordsDataFrame.createOrReplaceTempView("words")
# Do word count on table using SQL and print it
wordCountsDataFrame = \
spark.sql("select word, count(*) as total from words group by word")
wordCountsDataFrame.show()
except:
pass
words.foreachRDD(process)
ssc.start()
ssc.awaitTermination()
|
apache-2.0
|
lyft/incubator-airflow
|
airflow/hooks/jdbc_hook.py
|
5
|
1130
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.jdbc.hooks.jdbc`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.jdbc.hooks.jdbc import JdbcHook, jaydebeapi # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.jdbc.hooks.jdbc`.",
DeprecationWarning, stacklevel=2
)
|
apache-2.0
|
aduric/crossfit
|
nonrel/django/contrib/messages/storage/base.py
|
399
|
6134
|
from django.conf import settings
from django.utils.encoding import force_unicode, StrAndUnicode
from django.contrib.messages import constants, utils
LEVEL_TAGS = utils.get_level_tags()
class Message(StrAndUnicode):
"""
Represents an actual message that can be stored in any of the supported
storage classes (typically session- or cookie-based) and rendered in a view
or template.
"""
def __init__(self, level, message, extra_tags=None):
self.level = int(level)
self.message = message
self.extra_tags = extra_tags
def _prepare(self):
"""
Prepares the message for serialization by forcing the ``message``
and ``extra_tags`` to unicode in case they are lazy translations.
Known "safe" types (None, int, etc.) are not converted (see Django's
``force_unicode`` implementation for details).
"""
self.message = force_unicode(self.message, strings_only=True)
self.extra_tags = force_unicode(self.extra_tags, strings_only=True)
def __eq__(self, other):
return isinstance(other, Message) and self.level == other.level and \
self.message == other.message
def __unicode__(self):
return force_unicode(self.message)
def _get_tags(self):
label_tag = force_unicode(LEVEL_TAGS.get(self.level, ''),
strings_only=True)
extra_tags = force_unicode(self.extra_tags, strings_only=True)
if extra_tags and label_tag:
return u' '.join([extra_tags, label_tag])
elif extra_tags:
return extra_tags
elif label_tag:
return label_tag
return ''
tags = property(_get_tags)
class BaseStorage(object):
"""
This is the base backend for temporary message storage.
This is not a complete class; to be a usable storage backend, it must be
subclassed and the two methods ``_get`` and ``_store`` overridden.
"""
def __init__(self, request, *args, **kwargs):
self.request = request
self._queued_messages = []
self.used = False
self.added_new = False
super(BaseStorage, self).__init__(*args, **kwargs)
def __len__(self):
return len(self._loaded_messages) + len(self._queued_messages)
def __iter__(self):
self.used = True
if self._queued_messages:
self._loaded_messages.extend(self._queued_messages)
self._queued_messages = []
return iter(self._loaded_messages)
def __contains__(self, item):
return item in self._loaded_messages or item in self._queued_messages
@property
def _loaded_messages(self):
"""
Returns a list of loaded messages, retrieving them first if they have
not been loaded yet.
"""
if not hasattr(self, '_loaded_data'):
messages, all_retrieved = self._get()
self._loaded_data = messages or []
return self._loaded_data
def _get(self, *args, **kwargs):
"""
Retrieves a list of stored messages. Returns a tuple of the messages
and a flag indicating whether or not all the messages originally
intended to be stored in this storage were, in fact, stored and
retrieved; e.g., ``(messages, all_retrieved)``.
**This method must be implemented by a subclass.**
If it is possible to tell if the backend was not used (as opposed to
just containing no messages) then ``None`` should be returned in
place of ``messages``.
"""
raise NotImplementedError()
def _store(self, messages, response, *args, **kwargs):
"""
Stores a list of messages, returning a list of any messages which could
not be stored.
One type of object must be able to be stored, ``Message``.
**This method must be implemented by a subclass.**
"""
raise NotImplementedError()
def _prepare_messages(self, messages):
"""
Prepares a list of messages for storage.
"""
for message in messages:
message._prepare()
def update(self, response):
"""
Stores all unread messages.
If the backend has yet to be iterated, previously stored messages will
be stored again. Otherwise, only messages added after the last
iteration will be stored.
"""
self._prepare_messages(self._queued_messages)
if self.used:
return self._store(self._queued_messages, response)
elif self.added_new:
messages = self._loaded_messages + self._queued_messages
return self._store(messages, response)
def add(self, level, message, extra_tags=''):
"""
Queues a message to be stored.
The message is only queued if it contained something and its level is
not less than the recording level (``self.level``).
"""
if not message:
return
# Check that the message level is not less than the recording level.
level = int(level)
if level < self.level:
return
# Add the message.
self.added_new = True
message = Message(level, message, extra_tags=extra_tags)
self._queued_messages.append(message)
def _get_level(self):
"""
Returns the minimum recorded level.
The default level is the ``MESSAGE_LEVEL`` setting. If this is
not found, the ``INFO`` level is used.
"""
if not hasattr(self, '_level'):
self._level = getattr(settings, 'MESSAGE_LEVEL', constants.INFO)
return self._level
def _set_level(self, value=None):
"""
Sets a custom minimum recorded level.
If set to ``None``, the default level will be used (see the
``_get_level`` method).
"""
if value is None and hasattr(self, '_level'):
del self._level
else:
self._level = int(value)
level = property(_get_level, _set_level, _set_level)
|
bsd-3-clause
|
Nuevosmedios/django-badger
|
badger/validate_jsonp.py
|
7
|
6279
|
# -*- coding: utf-8 -*-
# see also: http://github.com/tav/scripts/raw/master/validate_jsonp.py
# Placed into the Public Domain by tav <tav@espians.com>
"""Validate Javascript Identifiers for use as JSON-P callback parameters."""
import re
from unicodedata import category
# ------------------------------------------------------------------------------
# javascript identifier unicode categories and "exceptional" chars
# ------------------------------------------------------------------------------
valid_jsid_categories_start = frozenset([
'Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl'
])
valid_jsid_categories = frozenset([
'Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', 'Mn', 'Mc', 'Nd', 'Pc'
])
valid_jsid_chars = ('$', '_')
# ------------------------------------------------------------------------------
# regex to find array[index] patterns
# ------------------------------------------------------------------------------
array_index_regex = re.compile(r'\[[0-9]+\]$')
has_valid_array_index = array_index_regex.search
replace_array_index = array_index_regex.sub
# ------------------------------------------------------------------------------
# javascript reserved words -- including keywords and null/boolean literals
# ------------------------------------------------------------------------------
is_reserved_js_word = frozenset([
'abstract', 'boolean', 'break', 'byte', 'case', 'catch', 'char', 'class',
'const', 'continue', 'debugger', 'default', 'delete', 'do', 'double',
'else', 'enum', 'export', 'extends', 'false', 'final', 'finally', 'float',
'for', 'function', 'goto', 'if', 'implements', 'import', 'in', 'instanceof',
'int', 'interface', 'long', 'native', 'new', 'null', 'package', 'private',
'protected', 'public', 'return', 'short', 'static', 'super', 'switch',
'synchronized', 'this', 'throw', 'throws', 'transient', 'true', 'try',
'typeof', 'var', 'void', 'volatile', 'while', 'with',
# potentially reserved in a future version of the ES5 standard
# 'let', 'yield'
]).__contains__
# ------------------------------------------------------------------------------
# the core validation functions
# ------------------------------------------------------------------------------
def is_valid_javascript_identifier(identifier, escape=r'\u', ucd_cat=category):
"""Return whether the given ``id`` is a valid Javascript identifier."""
if not identifier:
return False
if not isinstance(identifier, unicode):
try:
identifier = unicode(identifier, 'utf-8')
except UnicodeDecodeError:
return False
if escape in identifier:
new = []; add_char = new.append
split_id = identifier.split(escape)
add_char(split_id.pop(0))
for segment in split_id:
if len(segment) < 4:
return False
try:
add_char(unichr(int('0x' + segment[:4], 16)))
except Exception:
return False
add_char(segment[4:])
identifier = u''.join(new)
if is_reserved_js_word(identifier):
return False
first_char = identifier[0]
if not ((first_char in valid_jsid_chars) or
(ucd_cat(first_char) in valid_jsid_categories_start)):
return False
for char in identifier[1:]:
if not ((char in valid_jsid_chars) or
(ucd_cat(char) in valid_jsid_categories)):
return False
return True
def is_valid_jsonp_callback_value(value):
"""Return whether the given ``value`` can be used as a JSON-P callback."""
for identifier in value.split(u'.'):
while '[' in identifier:
if not has_valid_array_index(identifier):
return False
identifier = replace_array_index(u'', identifier)
if not is_valid_javascript_identifier(identifier):
return False
return True
# ------------------------------------------------------------------------------
# test
# ------------------------------------------------------------------------------
def test():
"""
The function ``is_valid_javascript_identifier`` validates a given identifier
according to the latest draft of the ECMAScript 5 Specification:
>>> is_valid_javascript_identifier('hello')
True
>>> is_valid_javascript_identifier('alert()')
False
>>> is_valid_javascript_identifier('a-b')
False
>>> is_valid_javascript_identifier('23foo')
False
>>> is_valid_javascript_identifier('foo23')
True
>>> is_valid_javascript_identifier('$210')
True
>>> is_valid_javascript_identifier(u'Stra\u00dfe')
True
>>> is_valid_javascript_identifier(r'\u0062') # u'b'
True
>>> is_valid_javascript_identifier(r'\u62')
False
>>> is_valid_javascript_identifier(r'\u0020')
False
>>> is_valid_javascript_identifier('_bar')
True
>>> is_valid_javascript_identifier('some_var')
True
>>> is_valid_javascript_identifier('$')
True
But ``is_valid_jsonp_callback_value`` is the function you want to use for
validating JSON-P callback parameter values:
>>> is_valid_jsonp_callback_value('somevar')
True
>>> is_valid_jsonp_callback_value('function')
False
>>> is_valid_jsonp_callback_value(' somevar')
False
It supports the possibility of '.' being present in the callback name, e.g.
>>> is_valid_jsonp_callback_value('$.ajaxHandler')
True
>>> is_valid_jsonp_callback_value('$.23')
False
As well as the pattern of providing an array index lookup, e.g.
>>> is_valid_jsonp_callback_value('array_of_functions[42]')
True
>>> is_valid_jsonp_callback_value('array_of_functions[42][1]')
True
>>> is_valid_jsonp_callback_value('$.ajaxHandler[42][1].foo')
True
>>> is_valid_jsonp_callback_value('array_of_functions[42]foo[1]')
False
>>> is_valid_jsonp_callback_value('array_of_functions[]')
False
>>> is_valid_jsonp_callback_value('array_of_functions["key"]')
False
Enjoy!
"""
if __name__ == '__main__':
import doctest
doctest.testmod()
|
bsd-3-clause
|
wcevans/grpc
|
tools/run_tests/python_utils/jobset.py
|
7
|
15927
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run a group of subprocesses and then finish."""
from __future__ import print_function
import logging
import multiprocessing
import os
import platform
import re
import signal
import subprocess
import sys
import tempfile
import time
# cpu cost measurement
measure_cpu_costs = False
_DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count()
_MAX_RESULT_SIZE = 8192
# NOTE: If you change this, please make sure to test reviewing the
# github PR with http://reviewable.io, which is known to add UTF-8
# characters to the PR description, which leak into the environment here
# and cause failures.
def strip_non_ascii_chars(s):
return ''.join(c for c in s if ord(c) < 128)
def sanitized_environment(env):
sanitized = {}
for key, value in env.items():
sanitized[strip_non_ascii_chars(key)] = strip_non_ascii_chars(value)
return sanitized
def platform_string():
if platform.system() == 'Windows':
return 'windows'
elif platform.system()[:7] == 'MSYS_NT':
return 'windows'
elif platform.system() == 'Darwin':
return 'mac'
elif platform.system() == 'Linux':
return 'linux'
else:
return 'posix'
# setup a signal handler so that signal.pause registers 'something'
# when a child finishes
# not using futures and threading to avoid a dependency on subprocess32
if platform_string() == 'windows':
pass
else:
have_alarm = False
def alarm_handler(unused_signum, unused_frame):
global have_alarm
have_alarm = False
signal.signal(signal.SIGCHLD, lambda unused_signum, unused_frame: None)
signal.signal(signal.SIGALRM, alarm_handler)
_SUCCESS = object()
_FAILURE = object()
_RUNNING = object()
_KILLED = object()
_COLORS = {
'red': [ 31, 0 ],
'green': [ 32, 0 ],
'yellow': [ 33, 0 ],
'lightgray': [ 37, 0],
'gray': [ 30, 1 ],
'purple': [ 35, 0 ],
'cyan': [ 36, 0 ]
}
_BEGINNING_OF_LINE = '\x1b[0G'
_CLEAR_LINE = '\x1b[2K'
_TAG_COLOR = {
'FAILED': 'red',
'FLAKE': 'purple',
'TIMEOUT_FLAKE': 'purple',
'WARNING': 'yellow',
'TIMEOUT': 'red',
'PASSED': 'green',
'START': 'gray',
'WAITING': 'yellow',
'SUCCESS': 'green',
'IDLE': 'gray',
'SKIPPED': 'cyan'
}
_FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=_FORMAT)
def message(tag, msg, explanatory_text=None, do_newline=False):
if message.old_tag == tag and message.old_msg == msg and not explanatory_text:
return
message.old_tag = tag
message.old_msg = msg
try:
if platform_string() == 'windows' or not sys.stdout.isatty():
if explanatory_text:
logging.info(explanatory_text)
logging.info('%s: %s', tag, msg)
else:
sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % (
_BEGINNING_OF_LINE,
_CLEAR_LINE,
'\n%s' % explanatory_text if explanatory_text is not None else '',
_COLORS[_TAG_COLOR[tag]][1],
_COLORS[_TAG_COLOR[tag]][0],
tag,
msg,
'\n' if do_newline or explanatory_text is not None else ''))
sys.stdout.flush()
except:
pass
message.old_tag = ''
message.old_msg = ''
def which(filename):
if '/' in filename:
return filename
for path in os.environ['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(path, filename)):
return os.path.join(path, filename)
raise Exception('%s not found' % filename)
class JobSpec(object):
"""Specifies what to run for a job."""
def __init__(self, cmdline, shortname=None, environ=None,
cwd=None, shell=False, timeout_seconds=5*60, flake_retries=0,
timeout_retries=0, kill_handler=None, cpu_cost=1.0,
verbose_success=False):
"""
Arguments:
cmdline: a list of arguments to pass as the command line
environ: a dictionary of environment variables to set in the child process
kill_handler: a handler that will be called whenever job.kill() is invoked
cpu_cost: number of cores per second this job needs
"""
if environ is None:
environ = {}
self.cmdline = cmdline
self.environ = environ
self.shortname = cmdline[0] if shortname is None else shortname
self.cwd = cwd
self.shell = shell
self.timeout_seconds = timeout_seconds
self.flake_retries = flake_retries
self.timeout_retries = timeout_retries
self.kill_handler = kill_handler
self.cpu_cost = cpu_cost
self.verbose_success = verbose_success
def identity(self):
return '%r %r' % (self.cmdline, self.environ)
def __hash__(self):
return hash(self.identity())
def __cmp__(self, other):
return self.identity() == other.identity()
def __repr__(self):
return 'JobSpec(shortname=%s, cmdline=%s)' % (self.shortname, self.cmdline)
class JobResult(object):
def __init__(self):
self.state = 'UNKNOWN'
self.returncode = -1
self.elapsed_time = 0
self.num_failures = 0
self.retries = 0
self.message = ''
class Job(object):
"""Manages one job."""
def __init__(self, spec, newline_on_success, travis, add_env,
quiet_success=False):
self._spec = spec
self._newline_on_success = newline_on_success
self._travis = travis
self._add_env = add_env.copy()
self._retries = 0
self._timeout_retries = 0
self._suppress_failure_message = False
self._quiet_success = quiet_success
if not self._quiet_success:
message('START', spec.shortname, do_newline=self._travis)
self.result = JobResult()
self.start()
def GetSpec(self):
return self._spec
def start(self):
self._tempfile = tempfile.TemporaryFile()
env = dict(os.environ)
env.update(self._spec.environ)
env.update(self._add_env)
env = sanitized_environment(env)
self._start = time.time()
cmdline = self._spec.cmdline
if measure_cpu_costs:
cmdline = ['time', '--portability'] + cmdline
try_start = lambda: subprocess.Popen(args=cmdline,
stderr=subprocess.STDOUT,
stdout=self._tempfile,
cwd=self._spec.cwd,
shell=self._spec.shell,
env=env)
delay = 0.3
for i in range(0, 4):
try:
self._process = try_start()
break
except OSError:
message('WARNING', 'Failed to start %s, retrying in %f seconds' % (self._spec.shortname, delay))
time.sleep(delay)
delay *= 2
else:
self._process = try_start()
self._state = _RUNNING
def state(self):
"""Poll current state of the job. Prints messages at completion."""
def stdout(self=self):
self._tempfile.seek(0)
stdout = self._tempfile.read()
self.result.message = stdout[-_MAX_RESULT_SIZE:]
return stdout
if self._state == _RUNNING and self._process.poll() is not None:
elapsed = time.time() - self._start
self.result.elapsed_time = elapsed
if self._process.returncode != 0:
if self._retries < self._spec.flake_retries:
message('FLAKE', '%s [ret=%d, pid=%d]' % (
self._spec.shortname, self._process.returncode, self._process.pid),
stdout(), do_newline=True)
self._retries += 1
self.result.num_failures += 1
self.result.retries = self._timeout_retries + self._retries
self.start()
else:
self._state = _FAILURE
if not self._suppress_failure_message:
message('FAILED', '%s [ret=%d, pid=%d]' % (
self._spec.shortname, self._process.returncode, self._process.pid),
stdout(), do_newline=True)
self.result.state = 'FAILED'
self.result.num_failures += 1
self.result.returncode = self._process.returncode
else:
self._state = _SUCCESS
measurement = ''
if measure_cpu_costs:
m = re.search(r'real ([0-9.]+)\nuser ([0-9.]+)\nsys ([0-9.]+)', stdout())
real = float(m.group(1))
user = float(m.group(2))
sys = float(m.group(3))
if real > 0.5:
cores = (user + sys) / real
measurement = '; cpu_cost=%.01f; estimated=%.01f' % (cores, self._spec.cpu_cost)
if not self._quiet_success:
message('PASSED', '%s [time=%.1fsec; retries=%d:%d%s]' % (
self._spec.shortname, elapsed, self._retries, self._timeout_retries, measurement),
stdout() if self._spec.verbose_success else None,
do_newline=self._newline_on_success or self._travis)
self.result.state = 'PASSED'
elif (self._state == _RUNNING and
self._spec.timeout_seconds is not None and
time.time() - self._start > self._spec.timeout_seconds):
if self._timeout_retries < self._spec.timeout_retries:
message('TIMEOUT_FLAKE', '%s [pid=%d]' % (self._spec.shortname, self._process.pid), stdout(), do_newline=True)
self._timeout_retries += 1
self.result.num_failures += 1
self.result.retries = self._timeout_retries + self._retries
if self._spec.kill_handler:
self._spec.kill_handler(self)
self._process.terminate()
self.start()
else:
message('TIMEOUT', '%s [pid=%d]' % (self._spec.shortname, self._process.pid), stdout(), do_newline=True)
self.kill()
self.result.state = 'TIMEOUT'
self.result.num_failures += 1
return self._state
def kill(self):
if self._state == _RUNNING:
self._state = _KILLED
if self._spec.kill_handler:
self._spec.kill_handler(self)
self._process.terminate()
def suppress_failure_message(self):
self._suppress_failure_message = True
class Jobset(object):
"""Manages one run of jobs."""
def __init__(self, check_cancelled, maxjobs, newline_on_success, travis,
stop_on_failure, add_env, quiet_success):
self._running = set()
self._check_cancelled = check_cancelled
self._cancelled = False
self._failures = 0
self._completed = 0
self._maxjobs = maxjobs
self._newline_on_success = newline_on_success
self._travis = travis
self._stop_on_failure = stop_on_failure
self._add_env = add_env
self._quiet_success = quiet_success
self.resultset = {}
self._remaining = None
self._start_time = time.time()
def set_remaining(self, remaining):
self._remaining = remaining
def get_num_failures(self):
return self._failures
def cpu_cost(self):
c = 0
for job in self._running:
c += job._spec.cpu_cost
return c
def start(self, spec):
"""Start a job. Return True on success, False on failure."""
while True:
if self.cancelled(): return False
current_cpu_cost = self.cpu_cost()
if current_cpu_cost == 0: break
if current_cpu_cost + spec.cpu_cost <= self._maxjobs: break
self.reap()
if self.cancelled(): return False
job = Job(spec,
self._newline_on_success,
self._travis,
self._add_env,
self._quiet_success)
self._running.add(job)
if job.GetSpec().shortname not in self.resultset:
self.resultset[job.GetSpec().shortname] = []
return True
def reap(self):
"""Collect the dead jobs."""
while self._running:
dead = set()
for job in self._running:
st = job.state()
if st == _RUNNING: continue
if st == _FAILURE or st == _KILLED:
self._failures += 1
if self._stop_on_failure:
self._cancelled = True
for job in self._running:
job.kill()
dead.add(job)
break
for job in dead:
self._completed += 1
if not self._quiet_success or job.result.state != 'PASSED':
self.resultset[job.GetSpec().shortname].append(job.result)
self._running.remove(job)
if dead: return
if not self._travis and platform_string() != 'windows':
rstr = '' if self._remaining is None else '%d queued, ' % self._remaining
if self._remaining is not None and self._completed > 0:
now = time.time()
sofar = now - self._start_time
remaining = sofar / self._completed * (self._remaining + len(self._running))
rstr = 'ETA %.1f sec; %s' % (remaining, rstr)
message('WAITING', '%s%d jobs running, %d complete, %d failed' % (
rstr, len(self._running), self._completed, self._failures))
if platform_string() == 'windows':
time.sleep(0.1)
else:
global have_alarm
if not have_alarm:
have_alarm = True
signal.alarm(10)
signal.pause()
def cancelled(self):
"""Poll for cancellation."""
if self._cancelled: return True
if not self._check_cancelled(): return False
for job in self._running:
job.kill()
self._cancelled = True
return True
def finish(self):
while self._running:
if self.cancelled(): pass # poll cancellation
self.reap()
return not self.cancelled() and self._failures == 0
def _never_cancelled():
return False
def tag_remaining(xs):
staging = []
for x in xs:
staging.append(x)
if len(staging) > 5000:
yield (staging.pop(0), None)
n = len(staging)
for i, x in enumerate(staging):
yield (x, n - i - 1)
def run(cmdlines,
check_cancelled=_never_cancelled,
maxjobs=None,
newline_on_success=False,
travis=False,
infinite_runs=False,
stop_on_failure=False,
add_env={},
skip_jobs=False,
quiet_success=False):
if skip_jobs:
resultset = {}
skipped_job_result = JobResult()
skipped_job_result.state = 'SKIPPED'
for job in cmdlines:
message('SKIPPED', job.shortname, do_newline=True)
resultset[job.shortname] = [skipped_job_result]
return 0, resultset
js = Jobset(check_cancelled,
maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
newline_on_success, travis, stop_on_failure, add_env,
quiet_success)
for cmdline, remaining in tag_remaining(cmdlines):
if not js.start(cmdline):
break
if remaining is not None:
js.set_remaining(remaining)
js.finish()
return js.get_num_failures(), js.resultset
|
bsd-3-clause
|
gbeauchesne/libva-intel-hybrid-driver
|
src/shaders/gpp.py
|
11
|
5313
|
#!/usr/bin/env python
#coding=UTF-8
# Copyright © 2011 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors:
# Chen, Yangyang <yangyang.chen@intel.com>
# Han, Haofu <haofu.han@intel.com>
#
import sys
class Block:
def __init__(self, ln=0, s=None):
assert type(ln) == int
assert type(s) == str or s == None
self.lineno = ln
self.text = s
self.subblocks = []
def append(self, block):
self.subblocks.append(block)
def checkfor(self, line):
import re
p = r'\$\s*for\s*'
if re.match(p, line) == None:
raise Exception(self.__errmsg('syntax error'))
tail = line.split('(', 1)[1].rsplit(')', 1)
conds = tail[0].split(';')
lb = tail[1]
if lb.strip() != '{':
raise Exception(self.__errmsg('missing "{"'))
if len(conds) != 3:
raise Exception(self.__errmsg('syntax error(miss ";"?)'))
init = conds[0]
cond = conds[1]
step = conds[2]
self.__parse_init(init)
self.__parse_cond(cond)
self.__parse_step(step)
def __parse_init(self, init):
inits = init.split(',')
self.param_init = []
for ini in inits:
try:
val = eval(ini)
self.param_init.append(val)
except:
raise Exception(self.__errmsg('non an exp: %s'%ini))
self.param_num = len(inits)
def __parse_cond(self, cond):
cond = cond.strip()
if cond[0] in ['<', '>']:
if cond[1] == '=':
self.param_op = cond[:2]
limit = cond[2:]
else:
self.param_op = cond[0]
limit = cond[1:]
try:
self.param_limit = eval(limit)
except:
raise Exception(self.__errmsg('non an exp: %s'%limit))
else:
raise Exception(self.__errmsg('syntax error'))
def __parse_step(self, step):
steps = step.split(',')
if len(steps) != self.param_num:
raise Exception(self.__errmsg('params number no match'))
self.param_step = []
for st in steps:
try:
val = eval(st)
self.param_step.append(val)
except:
raise Exception(self.__errmsg('non an exp: %s'%st))
def __errmsg(self, msg=''):
return '%d: %s' % (self.lineno, msg)
def readlines(f):
lines = f.readlines()
buf = []
for line in lines:
if '\\n' in line:
tmp = line.split('\\n')
buf.extend(tmp)
else:
buf.append(line)
return buf
def parselines(lines):
root = Block(0)
stack = [root]
lineno = 0
for line in lines:
lineno += 1
line = line.strip()
if line.startswith('$'):
block = Block(lineno)
block.checkfor(line)
stack[-1].append(block)
stack.append(block)
elif line.startswith('}'):
stack.pop()
elif line and not line.startswith('#'):
stack[-1].append(Block(lineno, line))
return root
def writeblocks(outfile, blocks):
buf = []
def check_cond(op, cur, lim):
assert op in ['<', '>', '<=', '>=']
assert type(cur) == int
assert type(lim) == int
return eval('%d %s %d' % (cur, op, lim))
def do_writeblock(block, curs):
if block.text != None:
import re
p = r'\%(\d+)'
newline = block.text
params = set(re.findall(p, block.text))
for param in params:
index = int(param) - 1
if index >= len(curs):
raise Exception('%d: too many param(%%%d)'%(block.lineno, index+1))
newline = newline.replace('%%%d'%(index+1), str(curs[index]))
if newline and \
not newline.startswith('.') and \
not newline.endswith(':') and \
not newline.endswith(';'):
newline += ';'
buf.append(newline)
else:
for_curs = block.param_init
while check_cond(block.param_op, for_curs[0], block.param_limit):
for sblock in block.subblocks:
do_writeblock(sblock, for_curs)
for i in range(0, block.param_num):
for_curs[i] += block.param_step[i]
for block in blocks.subblocks:
do_writeblock(block, [])
outfile.write('\n'.join(buf))
outfile.write('\n')
if __name__ == '__main__':
argc = len(sys.argv)
if argc == 1:
print >>sys.stderr, 'no input file'
sys.exit(0)
try:
infile = open(sys.argv[1], 'r')
except IOError:
print >>sys.stderr, 'can not open %s' % sys.argv[1]
sys.exit(1)
if argc == 2:
outfile = sys.stdout
else:
try:
outfile = open(sys.argv[2], 'w')
except IOError:
print >>sys.stderr, 'can not write to %s' % sys.argv[2]
sys.exit(1)
lines = readlines(infile)
try:
infile.close()
except IOError:
pass
blocks = parselines(lines)
writeblocks(outfile, blocks)
|
mit
|
openpermissions/repository-srv
|
tests/unit/controllers/test_assets_handler.py
|
1
|
5620
|
# -*- coding: utf-8 -*-
# Copyright 2016 Open Permissions Platform Coalition
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import pytest
from mock import MagicMock, patch
from koi import exceptions
from koi.test_helpers import gen_test, make_future
from repository.controllers.assets_handler import _validate_body, AssetsHandler
TEST_NAMESPACE = 'c8ab01'
TOKEN = 'eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJjbGllbnQiOnsic2VydmljZV90eXBlIjoiaW5kZXgiLCJvcmdhbmlzYXRpb25faWQiOiJ0Z' \
'XN0Y28iLCJpZCI6IjQyMjVmNDc3NGQ2ODc0YTY4NTY1YTA0MTMwMDAxMTQ0In0sImRlbGVnYXRlIjpmYWxzZSwiYXVkIjoibG9jYWxob3N0Ojg' \
'wMDcvYXV0aG9yaXplIiwiZXhwIjoxNDU2OTM5NDk0LCJpc3MiOiJsb2NhbGhvc3Q6ODAwNy90b2tlbiIsInNjb3BlIjoicmVhZCIsImdyYW50X' \
'3R5cGUiOiJjbGllbnRfY3JlZGVudGlhbHMiLCJzdWIiOiI0MjI1ZjQ3NzRkNjg3NGE2ODU2NWEwNDEzMDAwMTE0NCJ9.J4gFHMU-v_1f5xgWjd' \
'42JaZhHpYfaccPtvq5uZMox3jvcs2A7q1exI3YIB75x589wp6QRpChr5C-If4bR71vpZ09cSMoX4UKR5WOaMDAMeMh2QPEHYUCE1VWEyrr_o1i' \
'ljSk-bNfo8Mpufl67NL0J7rU7ZJ-o3ZwgoPIDTA1x1utcrvlLKTlWkmYGqEEBXxuL0V_vOGHW6UohXAA87jdMlgQRNTaZo75ETqbKp4sPIuiXz' \
'OoidEPjbvZpo7LkAfAea9Js-B6muWWaI_i2FO2K3c6XJvxZAiyufL-nE-fx1vSJQeOixEr6zbnOF_s7byETxHKlCwOrxpx0wqPrE0ttw'
class PartialMockedHandler(AssetsHandler):
def __init__(self, content_type=None):
super(PartialMockedHandler, self).__init__(application=MagicMock(),
request=MagicMock())
self.finish = MagicMock()
self.token = {'sub': 'client1', 'client': {'id': 'testco'}}
self.request.headers = {}
if content_type:
self.request.headers['Content-Type'] = content_type
def test__validate_body():
request = MagicMock()
request.body = 'test'
_validate_body(request)
def test__validate_body_no_body():
request = MagicMock()
request.body = None
with pytest.raises(exceptions.HTTPError) as exc:
_validate_body(request)
assert exc.value.status_code == 400
assert exc.value.errors == 'No Data in Body'
def test__validate_body_empty_body():
request = MagicMock()
request.body = ''
with pytest.raises(exceptions.HTTPError) as exc:
_validate_body(request)
assert exc.value.status_code == 400
assert exc.value.errors == 'Body is empty string'
@patch('repository.controllers.assets_handler._validate_body', return_value=None)
@patch('repository.controllers.assets_handler.helper')
@patch('repository.controllers.assets_handler.audit')
@patch('repository.controllers.assets_handler.asset')
@gen_test
def test_repository_assets_handler_post(assets, audit, helper, _validate_body):
helper.validate.return_value = None
assets.store.return_value = make_future('asset data')
audit.log_added_assets.return_value = make_future(None)
handler = PartialMockedHandler()
yield handler.post(TEST_NAMESPACE)
assert assets.store.call_count == 1
audit.log_added_assets.assert_called_once_with(
'asset data',
{'sub': 'client1', 'client': {'id': 'testco'}},
repository_id='c8ab01')
handler.finish.assert_called_once_with({"status": 200})
@patch('repository.controllers.assets_handler._validate_body', return_value=None)
@patch('repository.controllers.assets_handler.helper')
@patch('repository.controllers.assets_handler.asset')
def test_repository_assets_handler_post_error(assets, helper, _validate_body):
helper.validate.return_value = None
def mock_store(body, namespace, content_type):
raise exceptions.HTTPError(400, 'errormsg')
assets.store.side_effect = mock_store
handler = PartialMockedHandler()
with pytest.raises(exceptions.HTTPError) as exc:
yield handler.post(TEST_NAMESPACE)
assert exc.value.status_code == 400
@gen_test
def test_repository_assets_handler_post_invalid_content_type():
def mock_validate(_):
raise exceptions.HTTPError(415, 'errormsg')
handler = PartialMockedHandler(content_type='application/json')
with pytest.raises(exceptions.HTTPError) as exc:
yield handler.post(TEST_NAMESPACE)
assert exc.value.status_code == 415
@patch('repository.controllers.assets_handler._validate_body')
@gen_test
def test_repository_assets_handler_post_invalid_body(_validate_body):
def mock_validate(_):
raise exceptions.HTTPError(400, 'errormsg')
_validate_body.side_effect = mock_validate
handler = PartialMockedHandler()
with pytest.raises(exceptions.HTTPError) as exc:
yield handler.post(TEST_NAMESPACE)
assert exc.value.status_code == 400
@patch('repository.controllers.assets_handler._validate_body', return_value=None)
@patch('repository.controllers.assets_handler.helper')
@gen_test
def test_repository_assets_handler_post_invalid_body_xml(helper, _validate_body):
def mock_validate(data, format=None):
raise exceptions.HTTPError(400, 'errormsg')
helper.validate.side_effect = mock_validate
handler = PartialMockedHandler()
with pytest.raises(exceptions.HTTPError) as exc:
yield handler.post('repository1')
assert exc.value.status_code == 400
|
apache-2.0
|
valentin-krasontovitsch/ansible
|
lib/ansible/modules/cloud/cloudstack/cs_portforward.py
|
14
|
12322
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_portforward
short_description: Manages port forwarding rules on Apache CloudStack based clouds.
description:
- Create, update and remove port forwarding rules.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
ip_address:
description:
- Public IP address the rule is assigned to.
required: true
vm:
description:
- Name of virtual machine which we make the port forwarding rule for.
- Required if C(state=present).
state:
description:
- State of the port forwarding rule.
default: present
choices: [ present, absent ]
protocol:
description:
- Protocol of the port forwarding rule.
default: tcp
choices: [ tcp, udp ]
public_port:
description:
- Start public port for this rule.
required: true
public_end_port:
description:
- End public port for this rule.
- If not specified equal C(public_port).
private_port:
description:
- Start private port for this rule.
required: true
private_end_port:
description:
- End private port for this rule.
- If not specified equal C(private_port).
open_firewall:
description:
- Whether the firewall rule for public port should be created, while creating the new rule.
- Use M(cs_firewall) for managing firewall rules.
default: false
type: bool
vm_guest_ip:
description:
- VM guest NIC secondary IP address for the port forwarding rule.
default: false
network:
description:
- Name of the network.
version_added: "2.3"
vpc:
description:
- Name of the VPC.
version_added: "2.3"
domain:
description:
- Domain the C(vm) is related to.
account:
description:
- Account the C(vm) is related to.
project:
description:
- Name of the project the C(vm) is located in.
zone:
description:
- Name of the zone in which the virtual machine is in.
- If not set, default zone is used.
poll_async:
description:
- Poll async jobs until job has finished.
default: true
type: bool
tags:
description:
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
- "To delete all tags, set a empty list e.g. C(tags: [])."
aliases: [ tag ]
version_added: "2.4"
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: 1.2.3.4:80 -> web01:8080
local_action:
module: cs_portforward
ip_address: 1.2.3.4
vm: web01
public_port: 80
private_port: 8080
- name: forward SSH and open firewall
local_action:
module: cs_portforward
ip_address: '{{ public_ip }}'
vm: '{{ inventory_hostname }}'
public_port: '{{ ansible_ssh_port }}'
private_port: 22
open_firewall: true
- name: forward DNS traffic, but do not open firewall
local_action:
module: cs_portforward
ip_address: 1.2.3.4
vm: '{{ inventory_hostname }}'
public_port: 53
private_port: 53
protocol: udp
- name: remove ssh port forwarding
local_action:
module: cs_portforward
ip_address: 1.2.3.4
public_port: 22
private_port: 22
state: absent
'''
RETURN = '''
---
id:
description: UUID of the public IP address.
returned: success
type: str
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
ip_address:
description: Public IP address.
returned: success
type: str
sample: 1.2.3.4
protocol:
description: Protocol.
returned: success
type: str
sample: tcp
private_port:
description: Start port on the virtual machine's IP address.
returned: success
type: int
sample: 80
private_end_port:
description: End port on the virtual machine's IP address.
returned: success
type: int
public_port:
description: Start port on the public IP address.
returned: success
type: int
sample: 80
public_end_port:
description: End port on the public IP address.
returned: success
type: int
sample: 80
tags:
description: Tags related to the port forwarding.
returned: success
type: list
sample: []
vm_name:
description: Name of the virtual machine.
returned: success
type: str
sample: web-01
vm_display_name:
description: Display name of the virtual machine.
returned: success
type: str
sample: web-01
vm_guest_ip:
description: IP of the virtual machine.
returned: success
type: str
sample: 10.101.65.152
vpc:
description: Name of the VPC.
returned: success
type: str
sample: my_vpc
network:
description: Name of the network.
returned: success
type: str
sample: dmz
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import AnsibleCloudStack, cs_argument_spec, cs_required_together
class AnsibleCloudStackPortforwarding(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackPortforwarding, self).__init__(module)
self.returns = {
'virtualmachinedisplayname': 'vm_display_name',
'virtualmachinename': 'vm_name',
'ipaddress': 'ip_address',
'vmguestip': 'vm_guest_ip',
'publicip': 'public_ip',
'protocol': 'protocol',
}
# these values will be casted to int
self.returns_to_int = {
'publicport': 'public_port',
'publicendport': 'public_end_port',
'privateport': 'private_port',
'privateendport': 'private_end_port',
}
self.portforwarding_rule = None
def get_portforwarding_rule(self):
if not self.portforwarding_rule:
protocol = self.module.params.get('protocol')
public_port = self.module.params.get('public_port')
args = {
'ipaddressid': self.get_ip_address(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
}
portforwarding_rules = self.query_api('listPortForwardingRules', **args)
if portforwarding_rules and 'portforwardingrule' in portforwarding_rules:
for rule in portforwarding_rules['portforwardingrule']:
if (protocol == rule['protocol'] and
public_port == int(rule['publicport'])):
self.portforwarding_rule = rule
break
return self.portforwarding_rule
def present_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
portforwarding_rule = self.update_portforwarding_rule(portforwarding_rule)
else:
portforwarding_rule = self.create_portforwarding_rule()
if portforwarding_rule:
portforwarding_rule = self.ensure_tags(resource=portforwarding_rule, resource_type='PortForwardingRule')
self.portforwarding_rule = portforwarding_rule
return portforwarding_rule
def create_portforwarding_rule(self):
args = {
'protocol': self.module.params.get('protocol'),
'publicport': self.module.params.get('public_port'),
'publicendport': self.get_or_fallback('public_end_port', 'public_port'),
'privateport': self.module.params.get('private_port'),
'privateendport': self.get_or_fallback('private_end_port', 'private_port'),
'openfirewall': self.module.params.get('open_firewall'),
'vmguestip': self.get_vm_guest_ip(),
'ipaddressid': self.get_ip_address(key='id'),
'virtualmachineid': self.get_vm(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'networkid': self.get_network(key='id'),
}
portforwarding_rule = None
self.result['changed'] = True
if not self.module.check_mode:
portforwarding_rule = self.query_api('createPortForwardingRule', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def update_portforwarding_rule(self, portforwarding_rule):
args = {
'protocol': self.module.params.get('protocol'),
'publicport': self.module.params.get('public_port'),
'publicendport': self.get_or_fallback('public_end_port', 'public_port'),
'privateport': self.module.params.get('private_port'),
'privateendport': self.get_or_fallback('private_end_port', 'private_port'),
'vmguestip': self.get_vm_guest_ip(),
'ipaddressid': self.get_ip_address(key='id'),
'virtualmachineid': self.get_vm(key='id'),
'networkid': self.get_network(key='id'),
}
if self.has_changed(args, portforwarding_rule):
self.result['changed'] = True
if not self.module.check_mode:
# API broken in 4.2.1?, workaround using remove/create instead of update
# portforwarding_rule = self.query_api('updatePortForwardingRule', **args)
self.absent_portforwarding_rule()
portforwarding_rule = self.query_api('createPortForwardingRule', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def absent_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
self.result['changed'] = True
args = {
'id': portforwarding_rule['id'],
}
if not self.module.check_mode:
res = self.query_api('deletePortForwardingRule', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'portforwardingrule')
return portforwarding_rule
def get_result(self, portforwarding_rule):
super(AnsibleCloudStackPortforwarding, self).get_result(portforwarding_rule)
if portforwarding_rule:
for search_key, return_key in self.returns_to_int.items():
if search_key in portforwarding_rule:
self.result[return_key] = int(portforwarding_rule[search_key])
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
ip_address=dict(required=True),
protocol=dict(choices=['tcp', 'udp'], default='tcp'),
public_port=dict(type='int', required=True),
public_end_port=dict(type='int'),
private_port=dict(type='int', required=True),
private_end_port=dict(type='int'),
state=dict(choices=['present', 'absent'], default='present'),
open_firewall=dict(type='bool', default=False),
vm_guest_ip=dict(),
vm=dict(),
vpc=dict(),
network=dict(),
zone=dict(),
domain=dict(),
account=dict(),
project=dict(),
poll_async=dict(type='bool', default=True),
tags=dict(type='list', aliases=['tag']),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_pf = AnsibleCloudStackPortforwarding(module)
state = module.params.get('state')
if state in ['absent']:
pf_rule = acs_pf.absent_portforwarding_rule()
else:
pf_rule = acs_pf.present_portforwarding_rule()
result = acs_pf.get_result(pf_rule)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
SteveXiSong/ECE757-SnoopingPredictions
|
src/mem/cache/prefetch/Prefetcher.py
|
6
|
3820
|
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ron Dreslinski
from ClockedObject import ClockedObject
from m5.params import *
from m5.proxy import *
class BasePrefetcher(ClockedObject):
type = 'BasePrefetcher'
abstract = True
cxx_header = "mem/cache/prefetch/base.hh"
size = Param.Int(100,
"Number of entries in the hardware prefetch queue")
cross_pages = Param.Bool(False,
"Allow prefetches to cross virtual page boundaries")
serial_squash = Param.Bool(False,
"Squash prefetches with a later time on a subsequent miss")
degree = Param.Int(1,
"Degree of the prefetch depth")
latency = Param.Cycles('1', "Latency of the prefetcher")
use_master_id = Param.Bool(True,
"Use the master id to separate calculations of prefetches")
data_accesses_only = Param.Bool(False,
"Only prefetch on data not on instruction accesses")
on_miss_only = Param.Bool(False,
"Only prefetch on miss (as opposed to always)")
on_read_only = Param.Bool(False,
"Only prefetch on read requests (write requests ignored)")
on_prefetch = Param.Bool(True,
"Let lower cache prefetcher train on prefetch requests")
inst_tagged = Param.Bool(True,
"Perform a tagged prefetch for instruction fetches always")
sys = Param.System(Parent.any, "System this device belongs to")
class StridePrefetcher(BasePrefetcher):
type = 'StridePrefetcher'
cxx_class = 'StridePrefetcher'
cxx_header = "mem/cache/prefetch/stride.hh"
class TaggedPrefetcher(BasePrefetcher):
type = 'TaggedPrefetcher'
cxx_class = 'TaggedPrefetcher'
cxx_header = "mem/cache/prefetch/tagged.hh"
|
bsd-3-clause
|
igemsoftware/SYSU-Software2013
|
project/Python27/Lib/site-packages/win32comext/axscript/client/pydumper.py
|
35
|
2124
|
# pydumper.py
#
# This is being worked on - it does not yet work at all, in ay way
# shape or form :-)
#
# A new script engine, derived from the standard scripting engine,
# which dumps information.
# This generally can be used to grab all sorts of useful details about
# an engine - expose bugs in it or Python, dump the object model, etc.
# As it is derived from the standard engine, it fully supports Python
# as a scripting language - meaning the dumps produced can be quite dynamic,
# and based on the script code you execute.
import pyscript
from win32com.axscript import axscript
from pyscript import RaiseAssert, trace, Exception, SCRIPTTEXT_FORCEEXECUTION
PyDump_CLSID = '{ac527e60-c693-11d0-9c25-00aa00125a98}'
class AXScriptAttribute(pyscript.AXScriptAttribute):
pass
class NamedScriptAttribute(pyscript.NamedScriptAttribute):
pass
class PyScript(pyscript.PyScript):
pass
def Register():
import sys
if '-d' in sys.argv:
dispatcher = "DispatcherWin32trace"
debug_desc = " ("+dispatcher+")"
debug_option = "Yes"
else:
dispatcher = None
debug_desc = ""
debug_option = ""
categories = [axscript.CATID_ActiveScript,axscript.CATID_ActiveScriptParse]
clsid = PyDump_CLSID
lcid = 0x0409 # // english
policy = None # "win32com.axscript.client.axspolicy.AXScriptPolicy"
print "Registering COM server%s..." % debug_desc
from win32com.server.register import RegisterServer
languageName = "PyDump"
verProgId = "Python.Dumper.1"
RegisterServer(clsid = clsid, pythonInstString = "win32com.axscript.client.pyscript.PyDumper",
className = "Python Debugging/Dumping ActiveX Scripting Engine",
progID = languageName, verProgID = verProgId,
catids = categories,
policy=policy, dispatcher = dispatcher)
CreateRegKey(languageName + "\\OLEScript")
# Basic Registration for wsh.
win32com.server.register._set_string(".pysDump", "pysDumpFile")
win32com.server.register._set_string("pysDumpFile\\ScriptEngine", languageName)
print "Dumping Server registered."
if __name__=='__main__':
Register()
|
mit
|
Mirantis/swift-encrypt
|
swift/common/direct_client.py
|
3
|
18242
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Internal client library for making calls directly to the servers rather than
through the proxy.
"""
import socket
from httplib import HTTPException
from time import time
from urllib import quote as _quote
from eventlet import sleep, Timeout
from swift.common.bufferedhttp import http_connect
from swiftclient import ClientException, json_loads
from swift.common.utils import normalize_timestamp
from swift.common.http import HTTP_NO_CONTENT, HTTP_INSUFFICIENT_STORAGE, \
is_success, is_server_error
def quote(value, safe='/'):
if isinstance(value, unicode):
value = value.encode('utf8')
return _quote(value, safe)
def direct_get_account(node, part, account, marker=None, limit=None,
prefix=None, delimiter=None, conn_timeout=5,
response_timeout=15):
"""
Get listings directly from the account server.
:param node: node dictionary from the ring
:param part: partition the account is on
:param account: account name
:param marker: marker query
:param limit: query limit
:param prefix: prefix query
:param delimeter: delimeter for the query
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a tuple of (response headers, a list of containers) The response
headers will be a dict and all header names will be lowercase.
"""
path = '/' + account
qs = 'format=json'
if marker:
qs += '&marker=%s' % quote(marker)
if limit:
qs += '&limit=%d' % limit
if prefix:
qs += '&prefix=%s' % quote(prefix)
if delimiter:
qs += '&delimiter=%s' % quote(delimiter)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'GET', path, query_string=qs)
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
resp.read()
raise ClientException(
'Account server %s:%s direct GET %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
if resp.status == HTTP_NO_CONTENT:
resp.read()
return resp_headers, []
return resp_headers, json_loads(resp.read())
def direct_head_container(node, part, account, container, conn_timeout=5,
response_timeout=15):
"""
Request container information directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a dict containing the response's headers (all header names will
be lowercase)
"""
path = '/%s/%s' % (account, container)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'HEAD', path)
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Container server %s:%s direct HEAD %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
return resp_headers
def direct_get_container(node, part, account, container, marker=None,
limit=None, prefix=None, delimiter=None,
conn_timeout=5, response_timeout=15):
"""
Get container listings directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param marker: marker query
:param limit: query limit
:param prefix: prefix query
:param delimeter: delimeter for the query
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a tuple of (response headers, a list of objects) The response
headers will be a dict and all header names will be lowercase.
"""
path = '/%s/%s' % (account, container)
qs = 'format=json'
if marker:
qs += '&marker=%s' % quote(marker)
if limit:
qs += '&limit=%d' % limit
if prefix:
qs += '&prefix=%s' % quote(prefix)
if delimiter:
qs += '&delimiter=%s' % quote(delimiter)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'GET', path, query_string=qs)
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
resp.read()
raise ClientException(
'Container server %s:%s direct GET %s gave stats %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
if resp.status == HTTP_NO_CONTENT:
resp.read()
return resp_headers, []
return resp_headers, json_loads(resp.read())
def direct_delete_container(node, part, account, container, conn_timeout=5,
response_timeout=15, headers={}):
path = '/%s/%s' % (account, container)
headers['X-Timestamp'] = normalize_timestamp(time())
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'DELETE', path, headers)
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Container server %s:%s direct DELETE %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)), resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
def direct_head_object(node, part, account, container, obj, conn_timeout=5,
response_timeout=15):
"""
Request object information directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a dict containing the response's headers (all header names will
be lowercase)
"""
path = '/%s/%s/%s' % (account, container, obj)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'HEAD', path)
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Object server %s:%s direct HEAD %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
return resp_headers
def direct_get_object(node, part, account, container, obj, conn_timeout=5,
response_timeout=15, resp_chunk_size=None, headers={}):
"""
Get object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param resp_chunk_size: if defined, chunk size of data to read.
:param headers: dict to be passed into HTTPConnection headers
:returns: a tuple of (response headers, the object's contents) The response
headers will be a dict and all header names will be lowercase.
"""
path = '/%s/%s/%s' % (account, container, obj)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'GET', path, headers=headers)
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
resp.read()
raise ClientException(
'Object server %s:%s direct GET %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)), resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
if resp_chunk_size:
def _object_body():
buf = resp.read(resp_chunk_size)
while buf:
yield buf
buf = resp.read(resp_chunk_size)
object_body = _object_body()
else:
object_body = resp.read()
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
return resp_headers, object_body
def direct_put_object(node, part, account, container, name, contents,
content_length=None, etag=None, content_type=None,
headers=None, conn_timeout=5, response_timeout=15,
resp_chunk_size=None):
"""
Put object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param name: object name
:param contents: an iterable or string to read object data from
:param content_length: value to send as content-length header
:param etag: etag of contents
:param content_type: value to send as content-type header
:param headers: additional headers to include in the request
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param chunk_size: if defined, chunk size of data to send.
:returns: etag from the server response
"""
# TODO: Add chunked puts
path = '/%s/%s/%s' % (account, container, name)
if headers is None:
headers = {}
if etag:
headers['ETag'] = etag.strip('"')
if content_length is not None:
headers['Content-Length'] = str(content_length)
if content_type is not None:
headers['Content-Type'] = content_type
else:
headers['Content-Type'] = 'application/octet-stream'
if not contents:
headers['Content-Length'] = '0'
if isinstance(contents, basestring):
contents = [contents]
headers['X-Timestamp'] = normalize_timestamp(time())
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'PUT', path, headers=headers)
for chunk in contents:
conn.send(chunk)
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Object server %s:%s direct PUT %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
return resp.getheader('etag').strip('"')
def direct_post_object(node, part, account, container, name, headers,
conn_timeout=5, response_timeout=15):
"""
Direct update to object metadata on object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param name: object name
:param headers: headers to store as metadata
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:raises ClientException: HTTP POST request failed
"""
path = '/%s/%s/%s' % (account, container, name)
headers['X-Timestamp'] = normalize_timestamp(time())
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'POST', path, headers=headers)
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Object server %s:%s direct POST %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
def direct_delete_object(node, part, account, container, obj,
conn_timeout=5, response_timeout=15, headers={}):
"""
Delete object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: response from server
"""
path = '/%s/%s/%s' % (account, container, obj)
headers['X-Timestamp'] = normalize_timestamp(time())
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'DELETE', path, headers)
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Object server %s:%s direct DELETE %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
def retry(func, *args, **kwargs):
"""
Helper function to retry a given function a number of times.
:param func: callable to be called
:param retries: number of retries
:param error_log: logger for errors
:param args: arguments to send to func
:param kwargs: keyward arguments to send to func (if retries or
error_log are sent, they will be deleted from kwargs
before sending on to func)
:returns: restult of func
"""
retries = 5
if 'retries' in kwargs:
retries = kwargs['retries']
del kwargs['retries']
error_log = None
if 'error_log' in kwargs:
error_log = kwargs['error_log']
del kwargs['error_log']
attempts = 0
backoff = 1
while attempts <= retries:
attempts += 1
try:
return attempts, func(*args, **kwargs)
except (socket.error, HTTPException, Timeout), err:
if error_log:
error_log(err)
if attempts > retries:
raise
except ClientException, err:
if error_log:
error_log(err)
if attempts > retries or not is_server_error(err.http_status) or \
err.http_status == HTTP_INSUFFICIENT_STORAGE:
raise
sleep(backoff)
backoff *= 2
# Shouldn't actually get down here, but just in case.
if args and 'ip' in args[0]:
raise ClientException('Raise too many retries',
http_host=args[
0]['ip'], http_port=args[0]['port'],
http_device=args[0]['device'])
else:
raise ClientException('Raise too many retries')
|
apache-2.0
|
bregman-arie/ansible
|
lib/ansible/modules/cloud/azure/azure_rm_deployment.py
|
15
|
27035
|
#!/usr/bin/python
#
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: azure_rm_deployment
short_description: Create or destroy Azure Resource Manager template deployments
version_added: "2.1"
description:
- "Create or destroy Azure Resource Manager template deployments via the Azure SDK for Python.
You can find some quick start templates in GitHub here https://github.com/azure/azure-quickstart-templates.
For more information on Azue resource manager templates see https://azure.microsoft.com/en-us/documentation/articles/resource-group-template-deploy/."
options:
resource_group_name:
description:
- The resource group name to use or create to host the deployed template
required: true
location:
description:
- The geo-locations in which the resource group will be located.
default: westus
deployment_mode:
description:
- In incremental mode, resources are deployed without deleting existing resources that are not included in the template.
In complete mode resources are deployed and existing resources in the resource group not included in the template are deleted.
default: incremental
choices:
- complete
- incremental
state:
description:
- If state is "present", template will be created. If state is "present" and if deployment exists, it will be
updated. If state is "absent", stack will be removed.
default: present
choices:
- present
- absent
template:
description:
- A hash containing the templates inline. This parameter is mutually exclusive with 'template_link'.
Either one of them is required if "state" parameter is "present".
template_link:
description:
- Uri of file containing the template body. This parameter is mutually exclusive with 'template'. Either one
of them is required if "state" parameter is "present".
parameters:
description:
- A hash of all the required template variables for the deployment template. This parameter is mutually exclusive
with 'parameters_link'. Either one of them is required if "state" parameter is "present".
parameters_link:
description:
- Uri of file containing the parameters body. This parameter is mutually exclusive with 'parameters'. Either
one of them is required if "state" parameter is "present".
deployment_name:
description:
- The name of the deployment to be tracked in the resource group deployment history. Re-using a deployment name
will overwrite the previous value in the resource group's deployment history.
default: ansible-arm
wait_for_deployment_completion:
description:
- Whether or not to block until the deployment has completed.
type: bool
default: 'yes'
wait_for_deployment_polling_period:
description:
- Time (in seconds) to wait between polls when waiting for deployment completion.
default: 10
extends_documentation_fragment:
- azure
author:
- David Justice (@devigned)
- Laurent Mazuel (@lmazuel)
- Andre Price (@obsoleted)
'''
EXAMPLES = '''
# Destroy a template deployment
- name: Destroy Azure Deploy
azure_rm_deployment:
state: absent
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
# Create or update a template deployment based on uris using parameter and template links
- name: Create Azure Deploy
azure_rm_deployment:
state: present
resource_group_name: dev-ops-cle
template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.json'
parameters_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.parameters.json'
# Create or update a template deployment based on a uri to the template and parameters specified inline.
# This deploys a VM with SSH support for a given public key, then stores the result in 'azure_vms'. The result is then
# used to create a new host group. This host group is then used to wait for each instance to respond to the public IP SSH.
---
- hosts: localhost
connection: local
gather_facts: no
tasks:
- name: Destroy Azure Deploy
azure_rm_deployment:
state: absent
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
- name: Create Azure Deploy
azure_rm_deployment:
state: present
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
parameters:
newStorageAccountName:
value: devopsclestorage1
adminUsername:
value: devopscle
dnsNameForPublicIP:
value: devopscleazure
location:
value: West US
vmSize:
value: Standard_A2
vmName:
value: ansibleSshVm
sshKeyData:
value: YOUR_SSH_PUBLIC_KEY
template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-sshkey/azuredeploy.json'
register: azure
- name: Add new instance to host group
add_host:
hostname: "{{ item['ips'][0].public_ip }}"
groupname: azure_vms
with_items: "{{ azure.deployment.instances }}"
- hosts: azure_vms
user: devopscle
tasks:
- name: Wait for SSH to come up
wait_for:
port: 22
timeout: 2000
state: started
- name: echo the hostname of the vm
shell: hostname
# Deploy an Azure WebApp running a hello world'ish node app
- name: Create Azure WebApp Deployment at http://devopscleweb.azurewebsites.net/hello.js
azure_rm_deployment:
state: present
subscription_id: cbbdaed0-fea9-4693-bf0c-d446ac93c030
resource_group_name: dev-ops-cle-webapp
parameters:
repoURL:
value: 'https://github.com/devigned/az-roadshow-oss.git'
siteName:
value: devopscleweb
hostingPlanName:
value: someplan
siteLocation:
value: westus
sku:
value: Standard
template_link: 'https://raw.githubusercontent.com/azure/azure-quickstart-templates/master/201-web-app-github-deploy/azuredeploy.json'
# Create or update a template deployment based on an inline template and parameters
- name: Create Azure Deploy
azure_rm_deployment:
state: present
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
template:
$schema: "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#"
contentVersion: "1.0.0.0"
parameters:
newStorageAccountName:
type: "string"
metadata:
description: "Unique DNS Name for the Storage Account where the Virtual Machine's disks will be placed."
adminUsername:
type: "string"
metadata:
description: "User name for the Virtual Machine."
adminPassword:
type: "securestring"
metadata:
description: "Password for the Virtual Machine."
dnsNameForPublicIP:
type: "string"
metadata:
description: "Unique DNS Name for the Public IP used to access the Virtual Machine."
ubuntuOSVersion:
type: "string"
defaultValue: "14.04.2-LTS"
allowedValues:
- "12.04.5-LTS"
- "14.04.2-LTS"
- "15.04"
metadata:
description: >
The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version.
Allowed values: 12.04.5-LTS, 14.04.2-LTS, 15.04."
variables:
location: "West US"
imagePublisher: "Canonical"
imageOffer: "UbuntuServer"
OSDiskName: "osdiskforlinuxsimple"
nicName: "myVMNic"
addressPrefix: "192.0.2.0/24"
subnetName: "Subnet"
subnetPrefix: "10.0.0.0/24"
storageAccountType: "Standard_LRS"
publicIPAddressName: "myPublicIP"
publicIPAddressType: "Dynamic"
vmStorageAccountContainerName: "vhds"
vmName: "MyUbuntuVM"
vmSize: "Standard_D1"
virtualNetworkName: "MyVNET"
vnetID: "[resourceId('Microsoft.Network/virtualNetworks',variables('virtualNetworkName'))]"
subnetRef: "[concat(variables('vnetID'),'/subnets/',variables('subnetName'))]"
resources:
- type: "Microsoft.Storage/storageAccounts"
name: "[parameters('newStorageAccountName')]"
apiVersion: "2015-05-01-preview"
location: "[variables('location')]"
properties:
accountType: "[variables('storageAccountType')]"
- apiVersion: "2015-05-01-preview"
type: "Microsoft.Network/publicIPAddresses"
name: "[variables('publicIPAddressName')]"
location: "[variables('location')]"
properties:
publicIPAllocationMethod: "[variables('publicIPAddressType')]"
dnsSettings:
domainNameLabel: "[parameters('dnsNameForPublicIP')]"
- type: "Microsoft.Network/virtualNetworks"
apiVersion: "2015-05-01-preview"
name: "[variables('virtualNetworkName')]"
location: "[variables('location')]"
properties:
addressSpace:
addressPrefixes:
- "[variables('addressPrefix')]"
subnets:
-
name: "[variables('subnetName')]"
properties:
addressPrefix: "[variables('subnetPrefix')]"
- type: "Microsoft.Network/networkInterfaces"
apiVersion: "2015-05-01-preview"
name: "[variables('nicName')]"
location: "[variables('location')]"
dependsOn:
- "[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]"
- "[concat('Microsoft.Network/virtualNetworks/', variables('virtualNetworkName'))]"
properties:
ipConfigurations:
-
name: "ipconfig1"
properties:
privateIPAllocationMethod: "Dynamic"
publicIPAddress:
id: "[resourceId('Microsoft.Network/publicIPAddresses',variables('publicIPAddressName'))]"
subnet:
id: "[variables('subnetRef')]"
- type: "Microsoft.Compute/virtualMachines"
apiVersion: "2015-06-15"
name: "[variables('vmName')]"
location: "[variables('location')]"
dependsOn:
- "[concat('Microsoft.Storage/storageAccounts/', parameters('newStorageAccountName'))]"
- "[concat('Microsoft.Network/networkInterfaces/', variables('nicName'))]"
properties:
hardwareProfile:
vmSize: "[variables('vmSize')]"
osProfile:
computername: "[variables('vmName')]"
adminUsername: "[parameters('adminUsername')]"
adminPassword: "[parameters('adminPassword')]"
storageProfile:
imageReference:
publisher: "[variables('imagePublisher')]"
offer: "[variables('imageOffer')]"
sku: "[parameters('ubuntuOSVersion')]"
version: "latest"
osDisk:
name: "osdisk"
vhd:
uri: >
[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net/',variables('vmStorageAccountContainerName'),'/',
variables('OSDiskName'),'.vhd')]
caching: "ReadWrite"
createOption: "FromImage"
networkProfile:
networkInterfaces:
-
id: "[resourceId('Microsoft.Network/networkInterfaces',variables('nicName'))]"
diagnosticsProfile:
bootDiagnostics:
enabled: "true"
storageUri: "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net')]"
parameters:
newStorageAccountName:
value: devopsclestorage
adminUsername:
value: devopscle
adminPassword:
value: Password1!
dnsNameForPublicIP:
value: devopscleazure
'''
RETURN = '''
deployment:
description: Deployment details
type: dict
returned: always
sample:
group_name:
description: Name of the resource group
type: string
returned: always
id:
description: The Azure ID of the deployment
type: string
returned: always
instances:
description: Provides the public IP addresses for each VM instance.
type: list
returned: always
name:
description: Name of the deployment
type: string
returned: always
outputs:
description: Dictionary of outputs received from the deployment
type: dict
returned: always
'''
import time
try:
from azure.common.credentials import ServicePrincipalCredentials
import time
import yaml
except ImportError as exc:
IMPORT_ERROR = "Error importing module prerequisites: %s" % exc
try:
from itertools import chain
from azure.common.exceptions import CloudError
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.network import NetworkManagementClient
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
class AzureRMDeploymentManager(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group_name=dict(type='str', required=True, aliases=['resource_group']),
state=dict(type='str', default='present', choices=['present', 'absent']),
template=dict(type='dict', default=None),
parameters=dict(type='dict', default=None),
template_link=dict(type='str', default=None),
parameters_link=dict(type='str', default=None),
location=dict(type='str', default="westus"),
deployment_mode=dict(type='str', default='incremental', choices=['complete', 'incremental']),
deployment_name=dict(type='str', default="ansible-arm"),
wait_for_deployment_completion=dict(type='bool', default=True),
wait_for_deployment_polling_period=dict(type='int', default=10)
)
mutually_exclusive = [('template', 'template_link'),
('parameters', 'parameters_link')]
self.resource_group_name = None
self.state = None
self.template = None
self.parameters = None
self.template_link = None
self.parameters_link = None
self.location = None
self.deployment_mode = None
self.deployment_name = None
self.wait_for_deployment_completion = None
self.wait_for_deployment_polling_period = None
self.tags = None
self.results = dict(
deployment=dict(),
changed=False,
msg=""
)
super(AzureRMDeploymentManager, self).__init__(derived_arg_spec=self.module_arg_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=False)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
if self.state == 'present':
deployment = self.deploy_template()
if deployment is None:
self.results['deployment'] = dict(
name=self.deployment_name,
group_name=self.resource_group_name,
id=None,
outputs=None,
instances=None
)
else:
self.results['deployment'] = dict(
name=deployment.name,
group_name=self.resource_group_name,
id=deployment.id,
outputs=deployment.properties.outputs,
instances=self._get_instances(deployment)
)
self.results['changed'] = True
self.results['msg'] = 'deployment succeeded'
else:
if self.resource_group_exists(self.resource_group_name):
self.destroy_resource_group()
self.results['changed'] = True
self.results['msg'] = "deployment deleted"
return self.results
def deploy_template(self):
"""
Deploy the targeted template and parameters
:param module: Ansible module containing the validated configuration for the deployment template
:param client: resource management client for azure
:param conn_info: connection info needed
:return:
"""
deploy_parameter = self.rm_models.DeploymentProperties(self.deployment_mode)
if not self.parameters_link:
deploy_parameter.parameters = self.parameters
else:
deploy_parameter.parameters_link = self.rm_models.ParametersLink(
uri=self.parameters_link
)
if not self.template_link:
deploy_parameter.template = self.template
else:
deploy_parameter.template_link = self.rm_models.TemplateLink(
uri=self.template_link
)
params = self.rm_models.ResourceGroup(location=self.location, tags=self.tags)
try:
self.rm_client.resource_groups.create_or_update(self.resource_group_name, params)
except CloudError as exc:
self.fail("Resource group create_or_update failed with status code: %s and message: %s" %
(exc.status_code, exc.message))
try:
result = self.rm_client.deployments.create_or_update(self.resource_group_name,
self.deployment_name,
deploy_parameter)
deployment_result = None
if self.wait_for_deployment_completion:
deployment_result = self.get_poller_result(result)
while deployment_result.properties is None or deployment_result.properties.provisioning_state not in ['Canceled', 'Failed', 'Deleted',
'Succeeded']:
time.sleep(self.wait_for_deployment_polling_period)
deployment_result = self.rm_client.deployments.get(self.resource_group_name, self.deployment_name)
except CloudError as exc:
failed_deployment_operations = self._get_failed_deployment_operations(self.deployment_name)
self.log("Deployment failed %s: %s" % (exc.status_code, exc.message))
self.fail("Deployment failed with status code: %s and message: %s" % (exc.status_code, exc.message),
failed_deployment_operations=failed_deployment_operations)
if self.wait_for_deployment_completion and deployment_result.properties.provisioning_state != 'Succeeded':
self.log("provisioning state: %s" % deployment_result.properties.provisioning_state)
failed_deployment_operations = self._get_failed_deployment_operations(self.deployment_name)
self.fail('Deployment failed. Deployment id: %s' % deployment_result.id,
failed_deployment_operations=failed_deployment_operations)
return deployment_result
def destroy_resource_group(self):
"""
Destroy the targeted resource group
"""
try:
result = self.rm_client.resource_groups.delete(self.resource_group_name)
result.wait() # Blocking wait till the delete is finished
except CloudError as e:
if e.status_code == 404 or e.status_code == 204:
return
else:
self.fail("Delete resource group and deploy failed with status code: %s and message: %s" %
(e.status_code, e.message))
def resource_group_exists(self, resource_group):
'''
Return True/False based on existence of requested resource group.
:param resource_group: string. Name of a resource group.
:return: boolean
'''
try:
self.rm_client.resource_groups.get(resource_group)
except CloudError:
return False
return True
def _get_failed_nested_operations(self, current_operations):
new_operations = []
for operation in current_operations:
if operation.properties.provisioning_state == 'Failed':
new_operations.append(operation)
if operation.properties.target_resource and \
'Microsoft.Resources/deployments' in operation.properties.target_resource.id:
nested_deployment = operation.properties.target_resource.resource_name
try:
nested_operations = self.rm_client.deployment_operations.list(self.resource_group_name,
nested_deployment)
except CloudError as exc:
self.fail("List nested deployment operations failed with status code: %s and message: %s" %
(exc.status_code, exc.message))
new_nested_operations = self._get_failed_nested_operations(nested_operations)
new_operations += new_nested_operations
return new_operations
def _get_failed_deployment_operations(self, deployment_name):
results = []
# time.sleep(15) # there is a race condition between when we ask for deployment status and when the
# # status is available.
try:
operations = self.rm_client.deployment_operations.list(self.resource_group_name, deployment_name)
except CloudError as exc:
self.fail("Get deployment failed with status code: %s and message: %s" %
(exc.status_code, exc.message))
try:
results = [
dict(
id=op.id,
operation_id=op.operation_id,
status_code=op.properties.status_code,
status_message=op.properties.status_message,
target_resource=dict(
id=op.properties.target_resource.id,
resource_name=op.properties.target_resource.resource_name,
resource_type=op.properties.target_resource.resource_type
) if op.properties.target_resource else None,
provisioning_state=op.properties.provisioning_state,
)
for op in self._get_failed_nested_operations(operations)
]
except:
# If we fail here, the original error gets lost and user receives wrong error message/stacktrace
pass
self.log(dict(failed_deployment_operations=results), pretty_print=True)
return results
def _get_instances(self, deployment):
dep_tree = self._build_hierarchy(deployment.properties.dependencies)
vms = self._get_dependencies(dep_tree, resource_type="Microsoft.Compute/virtualMachines")
vms_and_nics = [(vm, self._get_dependencies(vm['children'], "Microsoft.Network/networkInterfaces"))
for vm in vms]
vms_and_ips = [(vm['dep'], self._nic_to_public_ips_instance(nics))
for vm, nics in vms_and_nics]
return [dict(vm_name=vm.resource_name, ips=[self._get_ip_dict(ip)
for ip in ips]) for vm, ips in vms_and_ips if len(ips) > 0]
def _get_dependencies(self, dep_tree, resource_type):
matches = [value for value in dep_tree.values() if value['dep'].resource_type == resource_type]
for child_tree in [value['children'] for value in dep_tree.values()]:
matches += self._get_dependencies(child_tree, resource_type)
return matches
def _build_hierarchy(self, dependencies, tree=None):
tree = dict(top=True) if tree is None else tree
for dep in dependencies:
if dep.resource_name not in tree:
tree[dep.resource_name] = dict(dep=dep, children=dict())
if isinstance(dep, self.rm_models.Dependency) and dep.depends_on is not None and len(dep.depends_on) > 0:
self._build_hierarchy(dep.depends_on, tree[dep.resource_name]['children'])
if 'top' in tree:
tree.pop('top', None)
keys = list(tree.keys())
for key1 in keys:
for key2 in keys:
if key2 in tree and key1 in tree[key2]['children'] and key1 in tree:
tree[key2]['children'][key1] = tree[key1]
tree.pop(key1)
return tree
def _get_ip_dict(self, ip):
ip_dict = dict(name=ip.name,
id=ip.id,
public_ip=ip.ip_address,
public_ip_allocation_method=str(ip.public_ip_allocation_method)
)
if ip.dns_settings:
ip_dict['dns_settings'] = {
'domain_name_label': ip.dns_settings.domain_name_label,
'fqdn': ip.dns_settings.fqdn
}
return ip_dict
def _nic_to_public_ips_instance(self, nics):
return [self.network_client.public_ip_addresses.get(public_ip_id.split('/')[4], public_ip_id.split('/')[-1])
for nic_obj in (self.network_client.network_interfaces.get(self.resource_group_name,
nic['dep'].resource_name) for nic in nics)
for public_ip_id in [ip_conf_instance.public_ip_address.id
for ip_conf_instance in nic_obj.ip_configurations
if ip_conf_instance.public_ip_address]]
def main():
AzureRMDeploymentManager()
if __name__ == '__main__':
main()
|
gpl-3.0
|
sh4wn/vispy
|
vispy/geometry/tests/test_generation.py
|
4
|
1541
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from vispy.testing import run_tests_if_main
from vispy.geometry import (create_box, create_cube, create_cylinder,
create_sphere, create_plane)
def test_box():
"""Test box function"""
vertices, filled, outline = create_box()
assert_array_equal(np.arange(len(vertices)), np.unique(filled))
assert_array_equal(np.arange(len(vertices)), np.unique(outline))
def test_cube():
"""Test cube function"""
vertices, filled, outline = create_cube()
assert_array_equal(np.arange(len(vertices)), np.unique(filled))
assert_array_equal(np.arange(len(vertices)), np.unique(outline))
def test_sphere():
"""Test sphere function"""
md = create_sphere(10, 20, radius=10)
radii = np.sqrt((md.get_vertices() ** 2).sum(axis=1))
assert_allclose(radii, np.ones_like(radii) * 10)
def test_cylinder():
"""Test cylinder function"""
md = create_cylinder(10, 20, radius=[10, 10])
radii = np.sqrt((md.get_vertices()[:, :2] ** 2).sum(axis=1))
assert_allclose(radii, np.ones_like(radii) * 10)
def test_plane():
"""Test plane function"""
vertices, filled, outline = create_plane()
assert_array_equal(np.arange(len(vertices)), np.unique(filled))
assert_array_equal(np.arange(len(vertices)), np.unique(outline))
run_tests_if_main()
|
bsd-3-clause
|
tdliu/hoop-picks
|
lib/requests/packages/urllib3/poolmanager.py
|
66
|
13052
|
from __future__ import absolute_import
import collections
import functools
import logging
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown
from .packages.six.moves.urllib.parse import urljoin
from .request import RequestMethods
from .util.url import parse_url
from .util.retry import Retry
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
log = logging.getLogger(__name__)
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
'ssl_version', 'ca_cert_dir', 'ssl_context')
# The base fields to use when determining what pool to get a connection from;
# these do not rely on the ``connection_pool_kw`` and can be determined by the
# URL and potentially the ``urllib3.connection.port_by_scheme`` dictionary.
#
# All custom key schemes should include the fields in this key at a minimum.
BasePoolKey = collections.namedtuple('BasePoolKey', ('scheme', 'host', 'port'))
# The fields to use when determining what pool to get a HTTP and HTTPS
# connection from. All additional fields must be present in the PoolManager's
# ``connection_pool_kw`` instance variable.
HTTPPoolKey = collections.namedtuple(
'HTTPPoolKey', BasePoolKey._fields + ('timeout', 'retries', 'strict',
'block', 'source_address')
)
HTTPSPoolKey = collections.namedtuple(
'HTTPSPoolKey', HTTPPoolKey._fields + SSL_KEYWORDS
)
def _default_key_normalizer(key_class, request_context):
"""
Create a pool key of type ``key_class`` for a request.
According to RFC 3986, both the scheme and host are case-insensitive.
Therefore, this function normalizes both before constructing the pool
key for an HTTPS request. If you wish to change this behaviour, provide
alternate callables to ``key_fn_by_scheme``.
:param key_class:
The class to use when constructing the key. This should be a namedtuple
with the ``scheme`` and ``host`` keys at a minimum.
:param request_context:
A dictionary-like object that contain the context for a request.
It should contain a key for each field in the :class:`HTTPPoolKey`
"""
context = {}
for key in key_class._fields:
context[key] = request_context.get(key)
context['scheme'] = context['scheme'].lower()
context['host'] = context['host'].lower()
return key_class(**context)
# A dictionary that maps a scheme to a callable that creates a pool key.
# This can be used to alter the way pool keys are constructed, if desired.
# Each PoolManager makes a copy of this dictionary so they can be configured
# globally here, or individually on the instance.
key_fn_by_scheme = {
'http': functools.partial(_default_key_normalizer, HTTPPoolKey),
'https': functools.partial(_default_key_normalizer, HTTPSPoolKey),
}
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
# Locally set the pool classes and keys so other PoolManagers can
# override them.
self.pool_classes_by_scheme = pool_classes_by_scheme
self.key_fn_by_scheme = key_fn_by_scheme.copy()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.clear()
# Return False to re-raise any potential exceptions
return False
def _new_pool(self, scheme, host, port):
"""
Create a new :class:`ConnectionPool` based on host, port and scheme.
This method is used to actually create the connection pools handed out
by :meth:`connection_from_url` and companion methods. It is intended
to be overridden for customization.
"""
pool_cls = self.pool_classes_by_scheme[scheme]
kwargs = self.connection_pool_kw
if scheme == 'http':
kwargs = self.connection_pool_kw.copy()
for kw in SSL_KEYWORDS:
kwargs.pop(kw, None)
return pool_cls(host, port, **kwargs)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http'):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``.
"""
if not host:
raise LocationValueError("No host specified.")
request_context = self.connection_pool_kw.copy()
request_context['scheme'] = scheme or 'http'
if not port:
port = port_by_scheme.get(request_context['scheme'].lower(), 80)
request_context['port'] = port
request_context['host'] = host
return self.connection_from_context(request_context)
def connection_from_context(self, request_context):
"""
Get a :class:`ConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
"""
scheme = request_context['scheme'].lower()
pool_key_constructor = self.key_fn_by_scheme[scheme]
pool_key = pool_key_constructor(request_context)
return self.connection_from_pool_key(pool_key)
def connection_from_pool_key(self, pool_key):
"""
Get a :class:`ConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
"""
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
pool = self._new_pool(pool_key.scheme, pool_key.host, pool_key.port)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url` but
doesn't pass any additional parameters to the
:class:`urllib3.connectionpool.ConnectionPool` constructor.
Additional parameters are taken from the :class:`.PoolManager`
constructor.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 7231, Section 6.4.4
if response.status == 303:
method = 'GET'
retries = kw.get('retries')
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
raise
return response
kw['retries'] = retries
kw['redirect'] = redirect
log.info("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary contaning headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(self, proxy_url, num_pools=10, headers=None,
proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
if proxy.scheme not in ("http", "https"):
raise ProxySchemeUnknown(proxy.scheme)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http'):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
netloc = parse_url(url).netloc
if netloc:
headers_['Host'] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
headers = kw.get('headers', self.headers)
kw['headers'] = self._set_proxy_headers(url, headers)
return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
|
apache-2.0
|
GinnyN/towerofdimensions-django
|
build/lib/django/contrib/localflavor/in_/forms.py
|
87
|
3908
|
"""
India-specific Form helpers.
"""
from __future__ import absolute_import
import re
from django.contrib.localflavor.in_.in_states import STATES_NORMALIZED, STATE_CHOICES
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, CharField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
phone_digits_re = re.compile(r"""
(
(?P<std_code> # the std-code group
^0 # all std-codes start with 0
(
(?P<twodigit>\d{2}) | # either two, three or four digits
(?P<threedigit>\d{3}) | # following the 0
(?P<fourdigit>\d{4})
)
)
[-\s] # space or -
(?P<phone_no> # the phone number group
[1-6] # first digit of phone number
(
(?(twodigit)\d{7}) | # 7 more phone digits for 3 digit stdcode
(?(threedigit)\d{6}) | # 6 more phone digits for 4 digit stdcode
(?(fourdigit)\d{5}) # 5 more phone digits for 5 digit stdcode
)
)
)$""", re.VERBOSE)
class INZipCodeField(RegexField):
default_error_messages = {
'invalid': _(u'Enter a zip code in the format XXXXXX or XXX XXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(INZipCodeField, self).__init__(r'^\d{3}\s?\d{3}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
super(INZipCodeField, self).clean(value)
if value in EMPTY_VALUES:
return u''
# Convert to "NNNNNN" if "NNN NNN" given
value = re.sub(r'^(\d{3})\s(\d{3})$', r'\1\2', value)
return value
class INStateField(Field):
"""
A form field that validates its input is a Indian state name or
abbreviation. It normalizes the input to the standard two-letter vehicle
registration abbreviation for the given state or union territory
"""
default_error_messages = {
'invalid': _(u'Enter an Indian state or territory.'),
}
def clean(self, value):
super(INStateField, self).clean(value)
if value in EMPTY_VALUES:
return u''
try:
value = value.strip().lower()
except AttributeError:
pass
else:
try:
return smart_unicode(STATES_NORMALIZED[value.strip().lower()])
except KeyError:
pass
raise ValidationError(self.error_messages['invalid'])
class INStateSelect(Select):
"""
A Select widget that uses a list of Indian states/territories as its
choices.
"""
def __init__(self, attrs=None):
super(INStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
class INPhoneNumberField(CharField):
"""
INPhoneNumberField validates that the data is a valid Indian phone number,
including the STD code. It's normalised to 0XXX-XXXXXXX or 0XXX XXXXXXX
format. The first string is the STD code which is a '0' followed by 2-4
digits. The second string is 8 digits if the STD code is 3 digits, 7
digits if the STD code is 4 digits and 6 digits if the STD code is 5
digits. The second string will start with numbers between 1 and 6. The
separator is either a space or a hyphen.
"""
default_error_messages = {
'invalid': _('Phone numbers must be in 02X-8X or 03X-7X or 04X-6X format.'),
}
def clean(self, value):
super(INPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = smart_unicode(value)
m = phone_digits_re.match(value)
if m:
return u'%s' % (value)
raise ValidationError(self.error_messages['invalid'])
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.