repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
hynnet/hiwifi-openwrt-HC5661-HC5761
|
staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/distutils/tests/test_unixccompiler.py
|
76
|
3715
|
"""Tests for distutils.unixccompiler."""
import sys
import unittest
from test.test_support import run_unittest
from distutils import sysconfig
from distutils.unixccompiler import UnixCCompiler
class UnixCCompilerTestCase(unittest.TestCase):
def setUp(self):
self._backup_platform = sys.platform
self._backup_get_config_var = sysconfig.get_config_var
class CompilerWrapper(UnixCCompiler):
def rpath_foo(self):
return self.runtime_library_dir_option('/foo')
self.cc = CompilerWrapper()
def tearDown(self):
sys.platform = self._backup_platform
sysconfig.get_config_var = self._backup_get_config_var
def test_runtime_libdir_option(self):
# not tested under windows
if sys.platform == 'win32':
return
# Issue#5900
#
# Ensure RUNPATH is added to extension modules with RPATH if
# GNU ld is used
# darwin
sys.platform = 'darwin'
self.assertEqual(self.cc.rpath_foo(), '-L/foo')
# hp-ux
sys.platform = 'hp-ux'
old_gcv = sysconfig.get_config_var
def gcv(v):
return 'xxx'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), ['+s', '-L/foo'])
def gcv(v):
return 'gcc'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), ['-Wl,+s', '-L/foo'])
def gcv(v):
return 'g++'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), ['-Wl,+s', '-L/foo'])
sysconfig.get_config_var = old_gcv
# irix646
sys.platform = 'irix646'
self.assertEqual(self.cc.rpath_foo(), ['-rpath', '/foo'])
# osf1V5
sys.platform = 'osf1V5'
self.assertEqual(self.cc.rpath_foo(), ['-rpath', '/foo'])
# GCC GNULD
sys.platform = 'bar'
def gcv(v):
if v == 'CC':
return 'gcc'
elif v == 'GNULD':
return 'yes'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), '-Wl,-R/foo')
# GCC non-GNULD
sys.platform = 'bar'
def gcv(v):
if v == 'CC':
return 'gcc'
elif v == 'GNULD':
return 'no'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), '-Wl,-R/foo')
# GCC GNULD with fully qualified configuration prefix
# see #7617
sys.platform = 'bar'
def gcv(v):
if v == 'CC':
return 'x86_64-pc-linux-gnu-gcc-4.4.2'
elif v == 'GNULD':
return 'yes'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), '-Wl,-R/foo')
# non-GCC GNULD
sys.platform = 'bar'
def gcv(v):
if v == 'CC':
return 'cc'
elif v == 'GNULD':
return 'yes'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), '-R/foo')
# non-GCC non-GNULD
sys.platform = 'bar'
def gcv(v):
if v == 'CC':
return 'cc'
elif v == 'GNULD':
return 'no'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), '-R/foo')
# AIX C/C++ linker
sys.platform = 'aix'
def gcv(v):
return 'xxx'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), '-R/foo')
def test_suite():
return unittest.makeSuite(UnixCCompilerTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
gpl-2.0
|
vinceasza/wordpress
|
wp-content/themes/play/node_modules/grunt-sass/node_modules/node-sass/node_modules/pangyp/gyp/tools/pretty_gyp.py
|
2618
|
4756
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pretty-prints the contents of a GYP file."""
import sys
import re
# Regex to remove comments when we're counting braces.
COMMENT_RE = re.compile(r'\s*#.*')
# Regex to remove quoted strings when we're counting braces.
# It takes into account quoted quotes, and makes sure that the quotes match.
# NOTE: It does not handle quotes that span more than one line, or
# cases where an escaped quote is preceeded by an escaped backslash.
QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)'
QUOTE_RE = re.compile(QUOTE_RE_STR)
def comment_replace(matchobj):
return matchobj.group(1) + matchobj.group(2) + '#' * len(matchobj.group(3))
def mask_comments(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)(#)(.*)')
return [search_re.sub(comment_replace, line) for line in input]
def quote_replace(matchobj):
return "%s%s%s%s" % (matchobj.group(1),
matchobj.group(2),
'x'*len(matchobj.group(3)),
matchobj.group(2))
def mask_quotes(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)' + QUOTE_RE_STR)
return [search_re.sub(quote_replace, line) for line in input]
def do_split(input, masked_input, search_re):
output = []
mask_output = []
for (line, masked_line) in zip(input, masked_input):
m = search_re.match(masked_line)
while m:
split = len(m.group(1))
line = line[:split] + r'\n' + line[split:]
masked_line = masked_line[:split] + r'\n' + masked_line[split:]
m = search_re.match(masked_line)
output.extend(line.split(r'\n'))
mask_output.extend(masked_line.split(r'\n'))
return (output, mask_output)
def split_double_braces(input):
"""Masks out the quotes and comments, and then splits appropriate
lines (lines that matche the double_*_brace re's above) before
indenting them below.
These are used to split lines which have multiple braces on them, so
that the indentation looks prettier when all laid out (e.g. closing
braces make a nice diagonal line).
"""
double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])')
double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])')
masked_input = mask_quotes(input)
masked_input = mask_comments(masked_input)
(output, mask_output) = do_split(input, masked_input, double_open_brace_re)
(output, mask_output) = do_split(output, mask_output, double_close_brace_re)
return output
def count_braces(line):
"""keeps track of the number of braces on a given line and returns the result.
It starts at zero and subtracts for closed braces, and adds for open braces.
"""
open_braces = ['[', '(', '{']
close_braces = [']', ')', '}']
closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$')
cnt = 0
stripline = COMMENT_RE.sub(r'', line)
stripline = QUOTE_RE.sub(r"''", stripline)
for char in stripline:
for brace in open_braces:
if char == brace:
cnt += 1
for brace in close_braces:
if char == brace:
cnt -= 1
after = False
if cnt > 0:
after = True
# This catches the special case of a closing brace having something
# other than just whitespace ahead of it -- we don't want to
# unindent that until after this line is printed so it stays with
# the previous indentation level.
if cnt < 0 and closing_prefix_re.match(stripline):
after = True
return (cnt, after)
def prettyprint_input(lines):
"""Does the main work of indenting the input based on the brace counts."""
indent = 0
basic_offset = 2
last_line = ""
for line in lines:
if COMMENT_RE.match(line):
print line
else:
line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix.
if len(line) > 0:
(brace_diff, after) = count_braces(line)
if brace_diff != 0:
if after:
print " " * (basic_offset * indent) + line
indent += brace_diff
else:
indent += brace_diff
print " " * (basic_offset * indent) + line
else:
print " " * (basic_offset * indent) + line
else:
print ""
last_line = line
def main():
if len(sys.argv) > 1:
data = open(sys.argv[1]).read().splitlines()
else:
data = sys.stdin.read().splitlines()
# Split up the double braces.
lines = split_double_braces(data)
# Indent and print the output.
prettyprint_input(lines)
return 0
if __name__ == '__main__':
sys.exit(main())
|
gpl-2.0
|
liweitianux/atoolbox
|
astro/radec2deg.py
|
1
|
1957
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Aaron LI
# Created: 2015-04-17
# Updated: 2016-06-30
#
"""
Convert the coordinates data in format (??h??m??s, ??d??m??s)
to format (degree, degree).
"""
import os
import sys
import re
import getopt
import math
USAGE = """Usage:
%(prog)s [ -h ] -i coords_file
Required arguments:
-i, --infile
infile containing the coordinates
Optional arguments:
-h, --help
""" % {'prog': os.path.basename(sys.argv[0])}
def usage():
print(USAGE)
def ra2deg(h, m, s):
return h * 15.0 + m * 15.0/60.0 + s * 15.0/3600.0
def dec2deg(d, m, s):
if (d >= 0):
sign = 1.0
else:
sign = -1.0
return sign * (math.fabs(d) + m/60.0 + s/3600.0)
def s_ra2deg(hms):
h, m, s = map(float, re.sub('[hms]', ' ', hms).split())
return h * 15.0 + m * 15.0/60.0 + s * 15.0/3600.0
def s_dec2deg(dms):
d, m, s = map(float, re.sub('[dms]', ' ', dms).split())
if (d >= 0):
sign = 1.0
else:
sign = -1.0
return sign * (math.fabs(d) + m/60.0 + s/3600.0)
def calc_offset(coord1, coord2):
ra1, dec1 = coord1
ra2, dec2 = coord2
return math.sqrt((ra1-ra2)**2 + (dec1-dec2)**2)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:",
["help", "infile="])
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit(1)
elif opt in ("-i", "--infile"):
infile = arg
else:
assert False, "unhandled option"
for line in open(infile):
if re.match(r"^\s*#", line) or re.match(r"^\s*$", line):
continue
ra, dec = line.split()
ra_deg = s_ra2deg(ra)
dec_deg = s_dec2deg(dec)
print("%.8f %.8f" % (ra_deg, dec_deg))
if __name__ == "__main__":
main()
|
mit
|
willharris/django
|
tests/utils_tests/test_numberformat.py
|
18
|
3517
|
from decimal import Decimal
from sys import float_info
from unittest import TestCase
from django.utils.numberformat import format as nformat
class TestNumberFormat(TestCase):
def test_format_number(self):
self.assertEqual(nformat(1234, '.'), '1234')
self.assertEqual(nformat(1234.2, '.'), '1234.2')
self.assertEqual(nformat(1234, '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=','),
'1234')
self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=',',
force_grouping=True), '12,34')
self.assertEqual(nformat(-1234.33, '.', decimal_pos=1), '-1234.3')
def test_format_string(self):
self.assertEqual(nformat('1234', '.'), '1234')
self.assertEqual(nformat('1234.2', '.'), '1234.2')
self.assertEqual(nformat('1234', '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=','),
'1234')
self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=',',
force_grouping=True), '12,34')
self.assertEqual(nformat('-1234.33', '.', decimal_pos=1), '-1234.3')
self.assertEqual(nformat('10000', '.', grouping=3,
thousand_sep='comma', force_grouping=True),
'10comma000')
def test_large_number(self):
most_max = ('{}179769313486231570814527423731704356798070567525844996'
'598917476803157260780028538760589558632766878171540458953'
'514382464234321326889464182768467546703537516986049910576'
'551282076245490090389328944075868508455133942304583236903'
'222948165808559332123348274797826204144723168738177180919'
'29988125040402618412485836{}')
most_max2 = ('{}35953862697246314162905484746340871359614113505168999'
'31978349536063145215600570775211791172655337563430809179'
'07028764928468642653778928365536935093407075033972099821'
'15310256415249098018077865788815173701691026788460916647'
'38064458963316171186642466965495956524082894463374763543'
'61838599762500808052368249716736')
int_max = int(float_info.max)
self.assertEqual(nformat(int_max, '.'), most_max.format('', '8'))
self.assertEqual(nformat(int_max + 1, '.'), most_max.format('', '9'))
self.assertEqual(nformat(int_max * 2, '.'), most_max2.format(''))
self.assertEqual(nformat(0 - int_max, '.'), most_max.format('-', '8'))
self.assertEqual(nformat(-1 - int_max, '.'), most_max.format('-', '9'))
self.assertEqual(nformat(-2 * int_max, '.'), most_max2.format('-'))
def test_decimal_numbers(self):
self.assertEqual(nformat(Decimal('1234'), '.'), '1234')
self.assertEqual(nformat(Decimal('1234.2'), '.'), '1234.2')
self.assertEqual(nformat(Decimal('1234'), '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=','), '1234')
self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34')
self.assertEqual(nformat(Decimal('-1234.33'), '.', decimal_pos=1), '-1234.3')
self.assertEqual(nformat(Decimal('0.00000001'), '.', decimal_pos=8), '0.00000001')
|
bsd-3-clause
|
lshain-android-source/external-chromium_org
|
chrome/tools/build/win/syzygy_instrument.py
|
31
|
6519
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A utility script to help building Syzygy-instrumented Chrome binaries."""
import glob
import logging
import optparse
import os
import shutil
import subprocess
import sys
# The default directory containing the Syzygy toolchain.
_DEFAULT_SYZYGY_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), '../../../..',
'third_party/syzygy/binaries/exe/'))
# Basenames of various tools.
_INSTRUMENT_EXE = 'instrument.exe'
_GENFILTER_EXE = 'genfilter.exe'
_ASAN_AGENT_DLL = 'asan_rtl.dll'
# Default agents for known modes.
_DEFAULT_AGENT_DLLS = { 'asan': _ASAN_AGENT_DLL }
_LOGGER = logging.getLogger()
def _Shell(*cmd, **kw):
"""Shells out to "cmd". Returns a tuple of cmd's stdout, stderr."""
_LOGGER.info('Running command "%s".', cmd)
prog = subprocess.Popen(cmd, **kw)
stdout, stderr = prog.communicate()
if prog.returncode != 0:
raise RuntimeError('Command "%s" returned %d.' % (cmd, prog.returncode))
return stdout, stderr
def _CompileFilter(syzygy_dir, executable, symbol, dst_dir, filter_file):
"""Compiles the provided filter writing the compiled filter file to
dst_dir. Returns the absolute path of the compiled filter.
"""
output_filter_file = os.path.abspath(os.path.join(
dst_dir, os.path.basename(filter_file) + '.json'))
cmd = [os.path.abspath(os.path.join(syzygy_dir, _GENFILTER_EXE)),
'--action=compile',
'--input-image=%s' % executable,
'--input-pdb=%s' % symbol,
'--output-file=%s' % output_filter_file,
'--overwrite',
os.path.abspath(filter_file)]
_Shell(*cmd)
if not os.path.exists(output_filter_file):
raise RuntimeError('Compiled filter file missing: %s' % output_filter_file)
return output_filter_file
def _InstrumentBinary(syzygy_dir, mode, executable, symbol, dst_dir,
filter_file):
"""Instruments the executable found in input_dir, and writes the resultant
instrumented executable and symbol files to dst_dir.
"""
cmd = [os.path.abspath(os.path.join(syzygy_dir, _INSTRUMENT_EXE)),
'--overwrite',
'--mode=%s' % mode,
'--debug-friendly',
'--input-image=%s' % executable,
'--input-pdb=%s' % symbol,
'--output-image=%s' % os.path.abspath(
os.path.join(dst_dir, os.path.basename(executable))),
'--output-pdb=%s' % os.path.abspath(
os.path.join(dst_dir, os.path.basename(symbol)))]
# If a filter was specified then pass it on to the instrumenter.
if filter_file:
cmd.append('--filter=%s' % os.path.abspath(filter_file))
return _Shell(*cmd)
def _CopyAgentDLL(agent_dll, destination_dir):
"""Copy the agent DLL and PDB to the destination directory."""
dirname, agent_name = os.path.split(agent_dll);
agent_dst_name = os.path.join(destination_dir, agent_name);
shutil.copyfile(agent_dll, agent_dst_name)
# Search for the corresponding PDB file. We use this approach because
# the naming convention for PDBs has changed recently (from 'foo.pdb'
# to 'foo.dll.pdb') and we want to support both conventions during the
# transition.
agent_pdbs = glob.glob(os.path.splitext(agent_dll)[0] + '*.pdb')
if len(agent_pdbs) != 1:
raise RuntimeError('Failed to locate PDB file for %s' % agent_name)
agent_pdb = agent_pdbs[0]
agent_dst_pdb = os.path.join(destination_dir, os.path.split(agent_pdb)[1])
shutil.copyfile(agent_pdb, agent_dst_pdb)
def main(options):
# Make sure the destination directory exists.
if not os.path.isdir(options.destination_dir):
_LOGGER.info('Creating destination directory "%s".',
options.destination_dir)
os.makedirs(options.destination_dir)
# Compile the filter if one was provided.
filter_file = None
if options.filter:
filter_file = _CompileFilter(options.syzygy_dir,
options.input_executable,
options.input_symbol,
options.destination_dir,
options.filter)
# Instruments the binaries into the destination directory.
_InstrumentBinary(options.syzygy_dir,
options.mode,
options.input_executable,
options.input_symbol,
options.destination_dir,
filter_file)
# Copy the agent DLL and PDB to the destination directory.
_CopyAgentDLL(options.agent_dll, options.destination_dir);
def _ParseOptions():
option_parser = optparse.OptionParser()
option_parser.add_option('--input_executable',
help='The path to the input executable.')
option_parser.add_option('--input_symbol',
help='The path to the input symbol file.')
option_parser.add_option('--mode',
help='Specifies which instrumentation mode is to be used.')
option_parser.add_option('--agent_dll',
help='The agent DLL used by this instrumentation. If not specified a '
'default will be searched for.')
option_parser.add_option('--syzygy-dir', default=_DEFAULT_SYZYGY_DIR,
help='Instrumenter executable to use, defaults to "%default".')
option_parser.add_option('-d', '--destination_dir',
help='Destination directory for instrumented files.')
option_parser.add_option('--filter',
help='An optional filter. This will be compiled and passed to the '
'instrumentation executable.')
options, args = option_parser.parse_args()
if not options.mode:
option_parser.error('You must provide an instrumentation mode.')
if not options.input_executable:
option_parser.error('You must provide an input executable.')
if not options.input_symbol:
option_parser.error('You must provide an input symbol file.')
if not options.destination_dir:
option_parser.error('You must provide a destination directory.')
if not options.agent_dll:
if not options.mode in _DEFAULT_AGENT_DLLS:
option_parser.error('No known default agent DLL for mode "%s".' %
options.mode)
options.agent_dll = os.path.abspath(os.path.join(options.syzygy_dir,
_DEFAULT_AGENT_DLLS[options.mode]))
_LOGGER.info('Using default agent DLL: %s' % options.agent_dll)
return options
if '__main__' == __name__:
logging.basicConfig(level=logging.INFO)
sys.exit(main(_ParseOptions()))
|
bsd-3-clause
|
maestrano/openerp
|
openerp/addons/l10n_in_hr_payroll/report/report_hr_yearly_salary_detail.py
|
56
|
6767
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import datetime
from openerp.report import report_sxw
class employees_yearly_salary_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(employees_yearly_salary_report, self).__init__(cr, uid, name, context)
self.localcontext.update({
'time': time,
'get_employee': self.get_employee,
'get_employee_detail': self.get_employee_detail,
'cal_monthly_amt': self.cal_monthly_amt,
'get_periods': self.get_periods,
'get_total': self.get_total,
'get_allow': self.get_allow,
'get_deduct': self.get_deduct,
})
self.context = context
def get_periods(self, form):
self.mnths = []
# Get start year-month-date and end year-month-date
first_year = int(form['date_from'][0:4])
last_year = int(form['date_to'][0:4])
first_month = int(form['date_from'][5:7])
last_month = int(form['date_to'][5:7])
no_months = (last_year-first_year) * 12 + last_month - first_month + 1
current_month = first_month
current_year = first_year
# Get name of the months from integer
mnth_name = []
for count in range(0, no_months):
m = datetime.date(current_year, current_month, 1).strftime('%b')
mnth_name.append(m)
self.mnths.append(str(current_month) + '-' + str(current_year))
if current_month == 12:
current_month = 0
current_year = last_year
current_month = current_month + 1
for c in range(0, (12-no_months)):
mnth_name.append('None')
self.mnths.append('None')
return [mnth_name]
def get_employee(self, form):
return self.pool.get('hr.employee').browse(self.cr,self.uid, form.get('employee_ids', []), context=self.context)
def get_employee_detail(self, form, obj):
self.allow_list = []
self.deduct_list = []
self.total = 0.00
gross = False
net = False
payslip_lines = self.cal_monthly_amt(form, obj.id)
for line in payslip_lines:
for line[0] in line:
if line[0][0] == "Gross":
gross = line[0]
elif line[0][0] == "Net":
net = line[0]
elif line[0][13] > 0.0 and line[0][0] != "Net":
self.total += line[0][len(line[0])-1]
self.allow_list.append(line[0])
elif line[0][13] < 0.0:
self.total += line[0][len(line[0])-1]
self.deduct_list.append(line[0])
if gross:
self.allow_list.append(gross)
if net:
self.deduct_list.append(net)
return None
def cal_monthly_amt(self, form, emp_id):
category_obj = self.pool.get('hr.salary.rule.category')
result = []
res = []
salaries = {}
self.cr.execute('''SELECT rc.code, pl.name, sum(pl.total), \
to_char(date_to,'mm-yyyy') as to_date FROM hr_payslip_line as pl \
LEFT JOIN hr_salary_rule_category AS rc on (pl.category_id = rc.id) \
LEFT JOIN hr_payslip as p on pl.slip_id = p.id \
LEFT JOIN hr_employee as emp on emp.id = p.employee_id \
WHERE p.employee_id = %s \
GROUP BY rc.parent_id, pl.sequence, pl.id, pl.category_id,pl.name,p.date_to,rc.code \
ORDER BY pl.sequence, rc.parent_id''',(emp_id,))
salary = self.cr.fetchall()
for category in salary:
if category[0] not in salaries:
salaries.setdefault(category[0], {})
salaries[category[0]].update({category[1]: {category[3]: category[2]}})
elif category[1] not in salaries[category[0]]:
salaries[category[0]].setdefault(category[1], {})
salaries[category[0]][category[1]].update({category[3]: category[2]})
else:
salaries[category[0]][category[1]].update({category[3]: category[2]})
category_ids = category_obj.search(self.cr,self.uid, [], context=self.context)
categories = category_obj.read(self.cr, self.uid, category_ids, ['code'], context=self.context)
for code in map(lambda x: x['code'], categories):
if code in salaries:
res = self.salary_list(salaries[code])
result.append(res)
return result
def salary_list(self, salaries):
cat_salary_all = []
for category_name,amount in salaries.items():
cat_salary = []
total = 0.0
cat_salary.append(category_name)
for mnth in self.mnths:
if mnth <> 'None':
if len(mnth) != 7:
mnth = '0' + str(mnth)
if mnth in amount and amount[mnth]:
cat_salary.append(amount[mnth])
total += amount[mnth]
else:
cat_salary.append(0.00)
else:
cat_salary.append('')
cat_salary.append(total)
cat_salary_all.append(cat_salary)
return cat_salary_all
def get_allow(self):
return self.allow_list
def get_deduct(self):
return self.deduct_list
def get_total(self):
return self.total
report_sxw.report_sxw('report.salary.detail.byyear', 'yearly.salary.detail', 'hr_payroll/report/report_hr_yearly_salary_detail.rml', parser=employees_yearly_salary_report, header='internal landscape')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
TeamTwisted/hells-Core-N5
|
tools/perf/scripts/python/netdev-times.py
|
11271
|
15048
|
# Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
gpl-2.0
|
syndicate-storage/syndicate-core
|
tests/gateways/syndicate-write-trunc-read-repl.py
|
2
|
5378
|
#!/usr/bin/env python
"""
Copyright 2016 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import subprocess
import random
import time
import testlib
import testconf
import shutil
REPL_PATH = os.path.join(testconf.SYNDICATE_UG_ROOT, "syndicate-repl")
MKDIR_PATH = os.path.join(testconf.SYNDICATE_UG_ROOT, "syndicate-mkdir")
RG_PATH = os.path.join(testconf.SYNDICATE_RG_ROOT, "syndicate-rg")
RG_DRIVER = os.path.join(testconf.SYNDICATE_PYTHON_ROOT, "syndicate/rg/drivers/disk" )
NUM_FILES = 2
def stop_and_save( output_dir, proc, out_path, save_name ):
exitcode, out = testlib.stop_gateway( proc, out_path )
testlib.save_output( output_dir, save_name, out )
return exitcode, out
if __name__ == "__main__":
local_path = testlib.make_random_file(16384)
local_fd = open(local_path, "r")
write_data = local_fd.read()
local_fd.close()
config_dir, output_dir = testlib.test_setup()
volume_name = testlib.add_test_volume( config_dir, blocksize=1024 )
RG_gateway_name = testlib.add_test_gateway( config_dir, volume_name, "RG", caps="NONE", email=testconf.SYNDICATE_ADMIN )
testlib.update_gateway( config_dir, RG_gateway_name, "port=31112", "driver=%s" % RG_DRIVER )
rg_proc, rg_out_path = testlib.start_gateway( config_dir, RG_PATH, testconf.SYNDICATE_ADMIN, volume_name, RG_gateway_name, valgrind=True )
if not testlib.gateway_ping(31112, 15):
raise Exception("%s exited %s" % (RG_PATH, rg_proc.poll()))
# should cause the RG to get updated that there's a new gateway
gateway_name = testlib.add_test_gateway( config_dir, volume_name, "UG", caps="ALL", email=testconf.SYNDICATE_ADMIN )
cat_gateway_name = testlib.add_test_gateway( config_dir, volume_name, "UG", caps="ALL", email=testconf.SYNDICATE_ADMIN )
# look up reader gateway
cat_gateway_info = testlib.read_gateway( config_dir, cat_gateway_name )
cat_gateway_id = cat_gateway_info['g_id']
# look up RG
rg_gateway_info = testlib.read_gateway( config_dir, RG_gateway_name )
rg_gateway_id = rg_gateway_info['g_id']
volume_info = testlib.read_volume( config_dir, volume_name )
volume_id = volume_info['volume_id']
random_part = hex(random.randint(0, 2**32-1))[2:]
output_paths = []
# make target directory
exitcode, out = testlib.run( MKDIR_PATH, '-d2', '-f', '-c', os.path.join(config_dir, 'syndicate.conf'), '-u', testconf.SYNDICATE_ADMIN, '-v', volume_name, '-g', gateway_name, "/newdir", valgrind=True )
testlib.save_output( output_dir, "syndicate-mkdir", out )
if exitcode != 0:
stop_and_save( output_dir, rg_proc, rg_out_path, "syndicate-rg")
raise Exception("%s exited %s" % (PUT_PATH, exitcode))
# generate the commands to feed into the REPL
repl_cmd = ""
for i in xrange(0, NUM_FILES):
output_path = "/put-%s-%s" % (random_part, i)
output_paths.append(output_path)
repl_cmd += "create %s 0644\n" % output_path
repl_cmd += "write 0 0 %s %s\n" % (len(write_data), write_data)
repl_cmd += "close 0\n"
# truncate while closed
# make smaller
repl_cmd += "trunc %s %s\n" % (output_path, len(write_data)/2)
# read it back
repl_cmd += "open %s 2\n" % output_path
repl_cmd += "read 0 0 %s\n" % len(write_data)
repl_cmd += "close 0\n"
print "\n".join( ["< %s" % l[:min(80, len(l))] + "..."[:min(max(0, len(l)-80), 3)] for l in repl_cmd.split("\n")] )
# open the syndicate REPL
ug_proc, ug_out_path = testlib.start_gateway( config_dir, REPL_PATH, testconf.SYNDICATE_ADMIN, volume_name, gateway_name, valgrind=True, stdin=True)
if not testlib.gateway_ping( 31111, 15 ):
stop_and_save( output_dir, rg_proc, rg_out_path, "syndicate-rg")
raise Exception("%s exited %s" % (REPL_PATH, repl_proc.poll()))
out, exit_rc = testlib.finish( ug_proc, stdin=repl_cmd, out_path=ug_out_path, valgrind=True )
testlib.save_output( output_dir, "syndicate-repl", out )
if exit_rc != 0:
stop_and_save( output_dir, rg_proc, rg_out_path, "syndicate-rg")
raise Exception("%s exited %s" % (REPL_PATH, exit_rc))
rg_exitcode, rg_out = stop_and_save( output_dir, rg_proc, rg_out_path, "syndicate-rg")
if rg_exitcode != 0:
raise Exception("%s exited %s" % (RG_PATH, rg_exitcode))
# search for the expected data
# should show up in the read commands
# expect only the first half
off = 0
expected_data = write_data[:(len(write_data)/2)]
for i in xrange(0, NUM_FILES):
off = out[off:].find(expected_data)
if off < 0:
# not found!
print expected_data
raise Exception("Missing count %s of expected data" % i)
off += len(expected_data)
sys.exit(0)
|
apache-2.0
|
jfantom/incubator-airflow
|
airflow/contrib/auth/backends/proxied_auth.py
|
1
|
3179
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from sys import version_info
import flask_login
from flask_login import login_required, current_user, logout_user
from airflow import settings
from airflow import models
from airflow.utils.log.logging_mixin import LoggingMixin
import os
log = LoggingMixin().log
class AuthenticationError(Exception):
pass
class ProxiedUser(models.User):
def __init__(self, user):
self.user = user
def is_active(self):
'''Required by flask_login'''
return True
def is_authenticated(self):
'''Required by flask_login'''
return True
def is_anonymous(self):
'''Required by flask_login'''
return False
def get_id(self):
'''Returns the current user id as required by flask_login'''
return self.user.get_id()
def data_profiling(self):
'''Provides access to data profiling tools'''
return True
def is_superuser(self):
'''Access all the things'''
return True
class ProxiedAuth(object):
def __init__(self):
self.login_manager = flask_login.LoginManager()
def init_app(self,flask_app):
self.flask_app = flask_app
self.login_manager.init_app(self.flask_app)
#checks headers instead of cookies
self.login_manager.request_loader(self.load_request)
# this is needed to disable the anti forgery check
flask_app.config['WTF_CSRF_CHECK_DEFAULT'] = False
def load_request(self, request):
'''
Reads the header field that has already been verified on the
nginx side by google auth. Header field is specified by setting
the environment variable AIRFLOW_PROXIED_AUTH_HEADER or else
it's defaulted to X-Email.
'''
session = settings.Session()
header_field = os.getenv('AIRFLOW_PROXIED_AUTH_HEADER', 'X-Email')
user_email = request.headers.get(header_field)
# this shouldn't happen since nginx should take care of it!
if user_email is None:
raise AuthenticationError(
'Airflow failed to get fields from request header')
# insert user into database if doesn't exist
user = session.query(models.User).filter(
models.User.username == user_email).first()
if not user:
user = models.User(
username=user_email,
is_superuser=True)
session.merge(user)
session.commit()
session.close()
return ProxiedUser(user)
login_manager = ProxiedAuth()
|
apache-2.0
|
iut-ibk/DynaMind-ToolBox
|
DynaMind-ComplexUnitTests/src/gtest-1.6.0/scripts/fuse_gtest_files.py
|
2577
|
8813
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print 'ERROR: Cannot find %s in directory %s.' % (relative_path,
directory)
print ('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print ('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print 'ABORTED.'
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in file(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = sets.Set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in file(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
|
gpl-2.0
|
AlbertoPeon/invenio
|
modules/bibrank/lib/bibrank_citation_grapher.py
|
10
|
6030
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
import os
import time
from invenio.config import CFG_SITE_URL, CFG_SITE_LANG, CFG_WEBDIR, CFG_BIBRANK_SHOW_CITATION_GRAPHS
from invenio.dbquery import run_sql
from invenio.messages import gettext_set_language
from invenio.bibrank_grapher import create_temporary_image, write_coordinates_in_tmp_file, remove_old_img
from invenio.bibrank_citation_searcher import calculate_cited_by_list
cfg_bibrank_print_citation_history = 1
color_line_list = ['9', '19', '10', '15', '21', '18']
cfg_bibrank_citation_history_min_x_points = 3 # do not generate graphs that have less than three points
def get_field_values(recID, tag):
"""Return list of field values for field tag inside record RECID."""
out = []
if tag == "001___":
out.append(str(recID))
else:
digit = tag[0:2]
bx = "bib%sx" % digit
bibx = "bibrec_bib%sx" % digit
query = "SELECT bx.value FROM %s AS bx, %s AS bibx WHERE bibx.id_bibrec='%s' AND bx.id=bibx.id_bibxxx AND bx.tag LIKE '%s'" "ORDER BY bibx.field_number, bx.tag ASC" % (bx, bibx, recID, tag)
res = run_sql(query)
for row in res:
out.append(row[0])
return out
def calculate_citation_history_coordinates(recid):
"""Return a list of citation graph coordinates for RECID, sorted by year."""
result = []
dbg = ""
initial_result= get_initial_result(calculate_citation_graphe_x_coordinates(recid))
citlist = calculate_cited_by_list(recid)
for rec_id, cit_weight in citlist:
cit_year = get_field_values(rec_id,'773__y')
if not cit_year:
cit_year = get_field_values(rec_id, '260__c')
if not cit_year:
cit_year = get_field_values(rec_id, '269__c')
#some records simlpy do not have these fields
if cit_year:
#maybe cit_year[0][0:4] has a typo and cannot
#be converted to an int
numeric=1
try:
tmpval = int(cit_year[0][0:4])
except ValueError:
numeric=0
if numeric and initial_result.has_key(int(cit_year[0][0:4])):
initial_result[int(cit_year[0][0:4])] += 1
for key, value in initial_result.items():
result.append((key, value))
result.sort()
if len(result) < cfg_bibrank_citation_history_min_x_points:
# do not generate graphs that have less than X points
return []
return result
def calculate_citation_graphe_x_coordinates(recid):
"""Return a range of year from the publication year of record RECID
until the current year."""
rec_years = []
recordyear = get_field_values(recid, '773__y')
if not recordyear:
recordyear = get_field_values(recid, '260__c')
if not recordyear:
recordyear = get_field_values(recid, '269__c')
currentyear = time.localtime()[0]
if recordyear == []:
recordyear = currentyear
else:
recordyear = find_year(recordyear[0])
interval = range(int(recordyear), currentyear+1)
return interval
def find_year(recordyear):
"""find the year in the string as a suite of 4 int"""
s = ""
for i in range(len(recordyear)-3):
s = recordyear[i:i+4]
if s.isalnum():
break
return s
def get_initial_result(rec_years):
"""return an initial dictionary with year of record publication as key
and zero as value
"""
result = {}
for year in rec_years :
result[year] = 0
return result
def html_command(file):
t = ''
if CFG_BIBRANK_SHOW_CITATION_GRAPHS == 1:
t = """<img src='%s/img/%s' align="center" alt="">""" % (CFG_SITE_URL, file)
elif CFG_BIBRANK_SHOW_CITATION_GRAPHS == 2:
t = open(CFG_WEBDIR + "/img/" + file).read()
#t += "</table></td></tr></table>"
return t
def create_citation_history_graph_and_box(recid, ln=CFG_SITE_LANG):
"""Create graph with citation history for record RECID (into a
temporary file) and return HTML box refering to that image.
Called by Detailed record pages.
"""
_ = gettext_set_language(ln)
html_result = ""
if cfg_bibrank_print_citation_history:
coordinates = calculate_citation_history_coordinates(recid)
if coordinates:
html_head = """<br /><table><tr><td class="blocknote">%s</td></tr></table>"""% _("Citation history:")
graphe_file_name = 'citation_%s_stats.png' % str(recid)
remove_old_img(graphe_file_name)
years = calculate_citation_graphe_x_coordinates(recid)
years.sort()
datas_info = write_coordinates_in_tmp_file([coordinates])
graphe = create_temporary_image(recid, 'citation', datas_info[0], 'Year', 'Times cited', [0,0], datas_info[1], [], ' ', years)
graphe_image = graphe[0]
graphe_source_file = graphe[1]
if graphe_image and graphe_source_file:
if os.path.exists(graphe_source_file):
os.unlink(datas_info[0])
html_graphe_code = """<p>%s"""% html_command(graphe_image)
html_result = html_head + html_graphe_code
return html_result
|
gpl-2.0
|
adw0rd/lettuce
|
tests/integration/lib/Django-1.2.5/django/contrib/comments/signals.py
|
425
|
1079
|
"""
Signals relating to comments.
"""
from django.dispatch import Signal
# Sent just before a comment will be posted (after it's been approved and
# moderated; this can be used to modify the comment (in place) with posting
# details or other such actions. If any receiver returns False the comment will be
# discarded and a 403 (not allowed) response. This signal is sent at more or less
# the same time (just before, actually) as the Comment object's pre-save signal,
# except that the HTTP request is sent along with this signal.
comment_will_be_posted = Signal(providing_args=["comment", "request"])
# Sent just after a comment was posted. See above for how this differs
# from the Comment object's post-save signal.
comment_was_posted = Signal(providing_args=["comment", "request"])
# Sent after a comment was "flagged" in some way. Check the flag to see if this
# was a user requesting removal of a comment, a moderator approving/removing a
# comment, or some other custom user flag.
comment_was_flagged = Signal(providing_args=["comment", "flag", "created", "request"])
|
gpl-3.0
|
neerja28/Tempest
|
tempest/common/generator/base_generator.py
|
28
|
6282
|
# Copyright 2014 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import functools
import jsonschema
from oslo_log import log as logging
import six
LOG = logging.getLogger(__name__)
def _check_for_expected_result(name, schema):
expected_result = None
if "results" in schema:
if name in schema["results"]:
expected_result = schema["results"][name]
return expected_result
def generator_type(*args, **kwargs):
def wrapper(func):
func.types = args
for key in kwargs:
setattr(func, key, kwargs[key])
return func
return wrapper
def simple_generator(fn):
"""
Decorator for simple generators that return one value
"""
@functools.wraps(fn)
def wrapped(self, schema):
result = fn(self, schema)
if result is not None:
expected_result = _check_for_expected_result(fn.__name__, schema)
return (fn.__name__, result, expected_result)
return
return wrapped
class BasicGeneratorSet(object):
_instance = None
schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"http-method": {
"enum": ["GET", "PUT", "HEAD",
"POST", "PATCH", "DELETE", 'COPY']
},
"admin_client": {"type": "boolean"},
"url": {"type": "string"},
"default_result_code": {"type": "integer"},
"json-schema": {},
"resources": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"name": {"type": "string"},
"expected_result": {"type": "integer"}
}
}
]
}
},
"results": {
"type": "object",
"properties": {}
}
},
"required": ["name", "http-method", "url"],
"additionalProperties": False,
}
def __init__(self):
self.types_dict = {}
for m in dir(self):
if callable(getattr(self, m)) and not'__' in m:
method = getattr(self, m)
if hasattr(method, "types"):
for type in method.types:
if type not in self.types_dict:
self.types_dict[type] = []
self.types_dict[type].append(method)
def validate_schema(self, schema):
if "json-schema" in schema:
jsonschema.Draft4Validator.check_schema(schema['json-schema'])
jsonschema.validate(schema, self.schema)
def generate_scenarios(self, schema, path=None):
"""
Generates the scenario (all possible test cases) out of the given
schema.
:param schema: a dict style schema (see ``BasicGeneratorSet.schema``)
:param path: the schema path if the given schema is a subschema
"""
schema_type = schema['type']
scenarios = []
if schema_type == 'object':
properties = schema["properties"]
for attribute, definition in six.iteritems(properties):
current_path = copy.copy(path)
if path is not None:
current_path.append(attribute)
else:
current_path = [attribute]
scenarios.extend(
self.generate_scenarios(definition, current_path))
elif isinstance(schema_type, list):
if "integer" in schema_type:
schema_type = "integer"
else:
raise Exception("non-integer list types not supported")
for generator in self.types_dict[schema_type]:
if hasattr(generator, "needed_property"):
prop = generator.needed_property
if (prop not in schema or
schema[prop] is None or
schema[prop] is False):
continue
name = generator.__name__
if ("exclude_tests" in schema and
name in schema["exclude_tests"]):
continue
if path is not None:
name = "%s_%s" % ("_".join(path), name)
scenarios.append({
"_negtest_name": name,
"_negtest_generator": generator,
"_negtest_schema": schema,
"_negtest_path": path})
return scenarios
def generate_payload(self, test, schema):
"""
Generates one jsonschema out of the given test. It's mandatory to use
generate_scenarios before to register all needed variables to the test.
:param test: A test object (scenario) with all _negtest variables on it
:param schema: schema for the test
"""
generator = test._negtest_generator
ret = generator(test._negtest_schema)
path = copy.copy(test._negtest_path)
expected_result = None
if ret is not None:
generator_result = generator(test._negtest_schema)
invalid_snippet = generator_result[1]
expected_result = generator_result[2]
element = path.pop()
if len(path) > 0:
schema_snip = reduce(dict.get, path, schema)
schema_snip[element] = invalid_snippet
else:
schema[element] = invalid_snippet
return expected_result
|
apache-2.0
|
CiscoSystems/vespa
|
neutron/plugins/mlnx/mlnx_plugin.py
|
4
|
22576
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from oslo.config import cfg
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.common import constants as q_const
from neutron.common import exceptions as q_exc
from neutron.common import topics
from neutron.common import utils
from neutron.db import agentschedulers_db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import extraroute_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_gwmode_db
from neutron.db import portbindings_db
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import portbindings
from neutron.extensions import providernet as provider
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.plugins.common import constants as svc_constants
from neutron.plugins.common import utils as plugin_utils
from neutron.plugins.mlnx import agent_notify_api
from neutron.plugins.mlnx.common import constants
from neutron.plugins.mlnx.db import mlnx_db_v2 as db
from neutron.plugins.mlnx import rpc_callbacks
LOG = logging.getLogger(__name__)
class MellanoxEswitchPlugin(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
extraroute_db.ExtraRoute_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
portbindings_db.PortBindingMixin):
"""Realization of Neutron API on Mellanox HCA embedded switch technology.
Current plugin provides embedded HCA Switch connectivity.
Code is based on the Linux Bridge plugin content to
support consistency with L3 & DHCP Agents.
A new VLAN is created for each network. An agent is relied upon
to perform the actual HCA configuration on each host.
The provider extension is also supported.
The port binding extension enables an external application relay
information to and from the plugin.
"""
# This attribute specifies whether the plugin supports or not
# bulk operations. Name mangling is used in order to ensure it
# is qualified by class
__native_bulk_support = True
_supported_extension_aliases = ["provider", "external-net", "router",
"ext-gw-mode", "binding", "quotas",
"security-group", "agent", "extraroute",
"l3_agent_scheduler",
"dhcp_agent_scheduler"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_if_noop_driver(aliases)
self._aliases = aliases
return self._aliases
def __init__(self):
"""Start Mellanox Neutron Plugin."""
db.initialize()
self._parse_network_vlan_ranges()
db.sync_network_states(self.network_vlan_ranges)
self._set_tenant_network_type()
self.vnic_type = cfg.CONF.ESWITCH.vnic_type
self.base_binding_dict = {
portbindings.VIF_TYPE: self.vnic_type,
portbindings.CAPABILITIES: {
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}}
self._setup_rpc()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
self.router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver
)
LOG.debug(_("Mellanox Embedded Switch Plugin initialisation complete"))
def _setup_rpc(self):
# RPC support
self.service_topics = {svc_constants.CORE: topics.PLUGIN,
svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN}
self.conn = rpc.create_connection(new=True)
self.callbacks = rpc_callbacks.MlnxRpcCallbacks()
self.dispatcher = self.callbacks.create_rpc_dispatcher()
for svc_topic in self.service_topics.values():
self.conn.create_consumer(svc_topic, self.dispatcher, fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
self.notifier = agent_notify_api.AgentNotifierApi(topics.AGENT)
self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
self.agent_notifiers[q_const.AGENT_TYPE_L3] = (
l3_rpc_agent_api.L3AgentNotify
)
def _parse_network_vlan_ranges(self):
try:
self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
cfg.CONF.MLNX.network_vlan_ranges)
except Exception as ex:
LOG.error(_("%s. Server terminated!"), ex)
sys.exit(1)
LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges)
def _add_network_vlan_range(self, physical_network, vlan_min, vlan_max):
self._add_network(physical_network)
self.network_vlan_ranges[physical_network].append((vlan_min, vlan_max))
def _add_network(self, physical_network):
if physical_network not in self.network_vlan_ranges:
self.network_vlan_ranges[physical_network] = []
def _extend_network_dict_provider(self, context, network):
binding = db.get_network_binding(context.session, network['id'])
network[provider.NETWORK_TYPE] = binding.network_type
if binding.network_type == svc_constants.TYPE_FLAT:
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = None
elif binding.network_type == svc_constants.TYPE_LOCAL:
network[provider.PHYSICAL_NETWORK] = None
network[provider.SEGMENTATION_ID] = None
else:
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = binding.segmentation_id
def _set_tenant_network_type(self):
self.tenant_network_type = cfg.CONF.MLNX.tenant_network_type
if self.tenant_network_type not in [svc_constants.TYPE_VLAN,
constants.TYPE_IB,
svc_constants.TYPE_LOCAL,
svc_constants.TYPE_NONE]:
LOG.error(_("Invalid tenant_network_type: %s. "
"Service terminated!"),
self.tenant_network_type)
sys.exit(1)
def _process_provider_create(self, context, attrs):
network_type = attrs.get(provider.NETWORK_TYPE)
physical_network = attrs.get(provider.PHYSICAL_NETWORK)
segmentation_id = attrs.get(provider.SEGMENTATION_ID)
network_type_set = attributes.is_attr_set(network_type)
physical_network_set = attributes.is_attr_set(physical_network)
segmentation_id_set = attributes.is_attr_set(segmentation_id)
if not (network_type_set or physical_network_set or
segmentation_id_set):
return (None, None, None)
if not network_type_set:
msg = _("provider:network_type required")
raise q_exc.InvalidInput(error_message=msg)
elif network_type == svc_constants.TYPE_FLAT:
self._process_flat_net(segmentation_id_set)
segmentation_id = constants.FLAT_VLAN_ID
elif network_type in [svc_constants.TYPE_VLAN, constants.TYPE_IB]:
self._process_vlan_net(segmentation_id, segmentation_id_set)
elif network_type == svc_constants.TYPE_LOCAL:
self._process_local_net(physical_network_set,
segmentation_id_set)
segmentation_id = constants.LOCAL_VLAN_ID
physical_network = None
else:
msg = _("provider:network_type %s not supported") % network_type
raise q_exc.InvalidInput(error_message=msg)
physical_network = self._process_net_type(network_type,
physical_network,
physical_network_set)
return (network_type, physical_network, segmentation_id)
def _process_flat_net(self, segmentation_id_set):
if segmentation_id_set:
msg = _("provider:segmentation_id specified for flat network")
raise q_exc.InvalidInput(error_message=msg)
def _process_vlan_net(self, segmentation_id, segmentation_id_set):
if not segmentation_id_set:
msg = _("provider:segmentation_id required")
raise q_exc.InvalidInput(error_message=msg)
if not utils.is_valid_vlan_tag(segmentation_id):
msg = (_("provider:segmentation_id out of range "
"(%(min_id)s through %(max_id)s)") %
{'min_id': q_const.MIN_VLAN_TAG,
'max_id': q_const.MAX_VLAN_TAG})
raise q_exc.InvalidInput(error_message=msg)
def _process_local_net(self, physical_network_set, segmentation_id_set):
if physical_network_set:
msg = _("provider:physical_network specified for local "
"network")
raise q_exc.InvalidInput(error_message=msg)
if segmentation_id_set:
msg = _("provider:segmentation_id specified for local "
"network")
raise q_exc.InvalidInput(error_message=msg)
def _process_net_type(self, network_type,
physical_network,
physical_network_set):
if network_type in [svc_constants.TYPE_VLAN,
constants.TYPE_IB,
svc_constants.TYPE_FLAT]:
if physical_network_set:
if physical_network not in self.network_vlan_ranges:
msg = _("Unknown provider:physical_network "
"%s") % physical_network
raise q_exc.InvalidInput(error_message=msg)
elif 'default' in self.network_vlan_ranges:
physical_network = 'default'
else:
msg = _("provider:physical_network required")
raise q_exc.InvalidInput(error_message=msg)
return physical_network
def _check_port_binding_for_net_type(self, vnic_type, net_type):
if net_type == svc_constants.TYPE_VLAN:
return vnic_type in (constants.VIF_TYPE_DIRECT,
constants.VIF_TYPE_HOSTDEV)
elif net_type == constants.TYPE_IB:
return vnic_type == constants.VIF_TYPE_HOSTDEV
return False
def _process_port_binding_create(self, context, attrs):
binding_profile = attrs.get(portbindings.PROFILE)
binding_profile_set = attributes.is_attr_set(binding_profile)
net_binding = db.get_network_binding(context.session,
attrs.get('network_id'))
net_type = net_binding.network_type
if not binding_profile_set:
return self.vnic_type
if constants.VNIC_TYPE in binding_profile:
vnic_type = binding_profile[constants.VNIC_TYPE]
if vnic_type in (constants.VIF_TYPE_DIRECT,
constants.VIF_TYPE_HOSTDEV):
if self._check_port_binding_for_net_type(vnic_type,
net_type):
self.base_binding_dict[portbindings.VIF_TYPE] = vnic_type
return vnic_type
else:
msg = (_("Unsupported vnic type %(vnic_type)s "
"for network type %(net_type)s") %
{'vnic_type': vnic_type, 'net_type': net_type})
else:
msg = _("Invalid vnic_type on port_create")
else:
msg = _("vnic_type is not defined in port profile")
raise q_exc.InvalidInput(error_message=msg)
def create_network(self, context, network):
(network_type, physical_network,
vlan_id) = self._process_provider_create(context,
network['network'])
session = context.session
with session.begin(subtransactions=True):
#set up default security groups
tenant_id = self._get_tenant_id_for_create(
context, network['network'])
self._ensure_default_security_group(context, tenant_id)
if not network_type:
# tenant network
network_type = self.tenant_network_type
if network_type == svc_constants.TYPE_NONE:
raise q_exc.TenantNetworksDisabled()
elif network_type in [svc_constants.TYPE_VLAN,
constants.TYPE_IB]:
physical_network, vlan_id = db.reserve_network(session)
else: # TYPE_LOCAL
vlan_id = constants.LOCAL_VLAN_ID
else:
# provider network
if network_type in [svc_constants.TYPE_VLAN,
constants.TYPE_IB,
svc_constants.TYPE_FLAT]:
db.reserve_specific_network(session,
physical_network,
vlan_id)
net = super(MellanoxEswitchPlugin, self).create_network(context,
network)
db.add_network_binding(session, net['id'],
network_type,
physical_network,
vlan_id)
self._process_l3_create(context, net, network['network'])
self._extend_network_dict_provider(context, net)
# note - exception will rollback entire transaction
LOG.debug(_("Created network: %s"), net['id'])
return net
def update_network(self, context, net_id, network):
LOG.debug(_("Update network"))
provider._raise_if_updates_provider_attributes(network['network'])
session = context.session
with session.begin(subtransactions=True):
net = super(MellanoxEswitchPlugin, self).update_network(context,
net_id,
network)
self._process_l3_update(context, net, network['network'])
self._extend_network_dict_provider(context, net)
return net
def delete_network(self, context, net_id):
LOG.debug(_("Delete network"))
session = context.session
with session.begin(subtransactions=True):
binding = db.get_network_binding(session, net_id)
super(MellanoxEswitchPlugin, self).delete_network(context,
net_id)
if binding.segmentation_id != constants.LOCAL_VLAN_ID:
db.release_network(session, binding.physical_network,
binding.segmentation_id,
self.network_vlan_ranges)
# the network_binding record is deleted via cascade from
# the network record, so explicit removal is not necessary
self.notifier.network_delete(context, net_id)
def get_network(self, context, net_id, fields=None):
session = context.session
with session.begin(subtransactions=True):
net = super(MellanoxEswitchPlugin, self).get_network(context,
net_id,
None)
self._extend_network_dict_provider(context, net)
return self._fields(net, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
session = context.session
with session.begin(subtransactions=True):
nets = super(MellanoxEswitchPlugin,
self).get_networks(context, filters, None, sorts,
limit, marker, page_reverse)
for net in nets:
self._extend_network_dict_provider(context, net)
return [self._fields(net, fields) for net in nets]
def _extend_port_dict_binding(self, context, port):
port_binding = db.get_port_profile_binding(context.session,
port['id'])
if port_binding:
port[portbindings.VIF_TYPE] = port_binding.vnic_type
binding = db.get_network_binding(context.session,
port['network_id'])
fabric = binding.physical_network
port[portbindings.PROFILE] = {'physical_network': fabric}
return port
def create_port(self, context, port):
LOG.debug(_("create_port with %s"), port)
session = context.session
port_data = port['port']
with session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
# Set port status as 'DOWN'. This will be updated by agent
port['port']['status'] = q_const.PORT_STATUS_DOWN
vnic_type = self._process_port_binding_create(context,
port['port'])
port = super(MellanoxEswitchPlugin,
self).create_port(context, port)
self._process_portbindings_create_and_update(context,
port_data,
port)
db.add_port_profile_binding(context.session, port['id'], vnic_type)
self._process_port_create_security_group(
context, port, sgids)
self.notify_security_groups_member_updated(context, port)
return self._extend_port_dict_binding(context, port)
def get_port(self, context, id, fields=None):
port = super(MellanoxEswitchPlugin, self).get_port(context,
id,
fields)
self._extend_port_dict_binding(context, port)
return self._fields(port, fields)
def get_ports(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
res_ports = []
ports = super(MellanoxEswitchPlugin,
self).get_ports(context, filters, fields, sorts,
limit, marker, page_reverse)
for port in ports:
port = self._extend_port_dict_binding(context, port)
res_ports.append(self._fields(port, fields))
return res_ports
def update_port(self, context, port_id, port):
original_port = self.get_port(context, port_id)
session = context.session
need_port_update_notify = False
with session.begin(subtransactions=True):
updated_port = super(MellanoxEswitchPlugin, self).update_port(
context, port_id, port)
self._process_portbindings_create_and_update(context,
port['port'],
updated_port)
need_port_update_notify = self.update_security_group_on_port(
context, port_id, port, original_port, updated_port)
need_port_update_notify |= self.is_security_group_member_updated(
context, original_port, updated_port)
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
if need_port_update_notify:
binding = db.get_network_binding(context.session,
updated_port['network_id'])
self.notifier.port_update(context, updated_port,
binding.physical_network,
binding.network_type,
binding.segmentation_id)
return self._extend_port_dict_binding(context, updated_port)
def delete_port(self, context, port_id, l3_port_check=True):
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, port_id)
session = context.session
with session.begin(subtransactions=True):
self.disassociate_floatingips(context, port_id)
port = self.get_port(context, port_id)
self._delete_port_security_group_bindings(context, port_id)
super(MellanoxEswitchPlugin, self).delete_port(context, port_id)
self.notify_security_groups_member_updated(context, port)
|
apache-2.0
|
dvklopfenstein/PrincetonAlgorithms
|
py/AlgsSedgewickWayne/Bag.py
|
1
|
2806
|
"""Bag class is a container for generic items."""
class Bag(object): # <Item> implements Iterable<Item>:
"""The Bag class represents a bag (or multiset) of generic items."""
class _Node(object): # private static class <Item>:
"""helper linked list class"""
def __init__(self, Item, Next):
self._item = Item
self._next = Next
def __init__(self):
self._first = None # beginning of bag
self._N = 0 # number of elements in bag
def isEmpty(self):
"""return true if this bag is empty; false otherwise."""
return self._first is None
def size(self):
"""Returns the number of items in this bag."""
return self._N
def add(self, item):
"""Adds the arg item to this bag."""
self._first = self._Node(item, self._first)
self._N += 1
# Returns an iterator that iterates over the items in the bag in arbitrary order.
def __iter__(self):
return self._ListIterator(self._first)
class _ListIterator(object): # <Item> implements Iterator<Item>:
"""an iterator, doesn't implement remove() since it's optional."""
def __init__(self, first):
self._current = first
def hasNext(self):
"""If we are not at the end of the Bag."""
return self._current is not None
def next(self):
"""Go to the next element."""
if not self.hasNext():
raise StopIteration
item = self._current._item
self._current = self._current._next
return item
#************************************************************************
# Compilation: javac Bag.java
# Execution: java Bag < input.txt
#
# A generic bag or multiset, implemented using a singly-linked list.
#
# % more tobe.txt
# to be or not to - be - - that - - - is
#
# % java Bag < tobe.txt
# size of bag = 14
# is
# -
# -
# -
# that
# -
# -
# be
# -
# to
# not
# or
# be
# to
#
#************************************************************************/
# The Bag class represents a bag (or multiset) of generic items.
# It supports insertion and iterating over the
# items in arbitrary order.
#
# This implementation uses a singly-linked list with a static nested class Node.
# See {@link LinkedBag} for the version from the
# textbook that uses a non-static nested class.
# The <em>add</em>, <em>isEmpty</em>, and <em>size</em> operations
# take constant time. Iteration takes time proportional to the number of items.
#
# For additional documentation, see
# <a href="http://algs4.cs.princeton.edu/13stacks">Section 1.3</a> of
# <i>Algorithms, 4th Edition</i> by Robert Sedgewick and Kevin Wayne.
#
# @author Robert Sedgewick
# @author Kevin Wayne
# @converted to Python by DV Klopfenstein
# Copyright (C) 2002-2010, Robert Sedgewick and Kevin Wayne.
# Java last updated: Tue Mar 25 04:52:35 EDT 2014.
|
gpl-2.0
|
hgl888/chromium-crosswalk-efl
|
tools/telemetry/telemetry/core/backends/webdriver/webdriver_desktop_browser_finder.py
|
25
|
4101
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds desktop browsers that can be controlled by telemetry."""
import logging
import os
import sys
from telemetry.core import browser
from telemetry.core import platform as platform_module
from telemetry.core import possible_browser
from telemetry.core import util
from telemetry.core.backends.webdriver import webdriver_ie_backend
from telemetry.util import support_binaries
# Try to import the selenium python lib which may be not available.
util.AddDirToPythonPath(
util.GetChromiumSrcDir(), 'third_party', 'webdriver', 'pylib')
try:
from selenium import webdriver # pylint: disable=F0401
except ImportError:
webdriver = None
class PossibleWebDriverBrowser(possible_browser.PossibleBrowser):
"""A browser that can be controlled through webdriver API."""
def __init__(self, browser_type, finder_options):
target_os = sys.platform.lower()
super(PossibleWebDriverBrowser, self).__init__(browser_type, target_os,
finder_options, False)
assert browser_type in FindAllBrowserTypes(finder_options), \
('Please add %s to webdriver_desktop_browser_finder.FindAllBrowserTypes'
% browser_type)
def CreateWebDriverBackend(self, platform_backend):
raise NotImplementedError()
def _InitPlatformIfNeeded(self):
if self._platform:
return
self._platform = platform_module.GetHostPlatform()
# pylint: disable=W0212
self._platform_backend = self._platform._platform_backend
def Create(self):
self._InitPlatformIfNeeded()
backend = self.CreateWebDriverBackend(self._platform_backend)
return browser.Browser(backend,
self._platform_backend,
self._archive_path,
self._append_to_existing_wpr,
self._make_javascript_deterministic,
self._credentials_path)
def SupportsOptions(self, finder_options):
if len(finder_options.extensions_to_load) != 0:
return False
return True
def UpdateExecutableIfNeeded(self):
pass
@property
def last_modification_time(self):
return -1
class PossibleDesktopIE(PossibleWebDriverBrowser):
def __init__(self, browser_type, finder_options, architecture):
super(PossibleDesktopIE, self).__init__(browser_type, finder_options)
self._architecture = architecture
def CreateWebDriverBackend(self, platform_backend):
assert webdriver
def DriverCreator():
ie_driver_exe = support_binaries.FindPath(
'IEDriverServer_%s' % self._architecture, 'win')
return webdriver.Ie(executable_path=ie_driver_exe)
return webdriver_ie_backend.WebDriverIEBackend(
platform_backend, DriverCreator, self.finder_options.browser_options)
def SelectDefaultBrowser(_):
return None
def FindAllBrowserTypes(_):
if webdriver:
return [
'internet-explorer',
'internet-explorer-x64']
else:
logging.warning('Webdriver backend is unsupported without selenium pylib. '
'For installation of selenium pylib, please refer to '
'https://code.google.com/p/selenium/wiki/PythonBindings.')
return []
def FindAllAvailableBrowsers(finder_options):
"""Finds all the desktop browsers available on this machine."""
browsers = []
if not webdriver:
return browsers
# Look for the IE browser in the standard location.
if sys.platform.startswith('win'):
ie_path = os.path.join('Internet Explorer', 'iexplore.exe')
search_paths = (
(32, os.getenv('PROGRAMFILES(X86)'), 'internet-explorer'),
(64, os.getenv('PROGRAMFILES'), 'internet-explorer-x64'),
)
for architecture, search_path, browser_type in search_paths:
if not search_path:
continue
if os.path.exists(os.path.join(search_path, ie_path)):
browsers.append(
PossibleDesktopIE(browser_type, finder_options, architecture))
return browsers
|
bsd-3-clause
|
zdary/intellij-community
|
python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_ws_comma.py
|
334
|
1095
|
"""Fixer that changes 'a ,b' into 'a, b'.
This also changes '{a :b}' into '{a: b}', but does not touch other
uses of colons. It does not touch other uses of whitespace.
"""
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
class FixWsComma(fixer_base.BaseFix):
explicit = True # The user must ask for this fixers
PATTERN = """
any<(not(',') any)+ ',' ((not(',') any)+ ',')* [not(',') any]>
"""
COMMA = pytree.Leaf(token.COMMA, u",")
COLON = pytree.Leaf(token.COLON, u":")
SEPS = (COMMA, COLON)
def transform(self, node, results):
new = node.clone()
comma = False
for child in new.children:
if child in self.SEPS:
prefix = child.prefix
if prefix.isspace() and u"\n" not in prefix:
child.prefix = u""
comma = True
else:
if comma:
prefix = child.prefix
if not prefix:
child.prefix = u" "
comma = False
return new
|
apache-2.0
|
austgl/shadowsocks
|
shadowsocks/shell.py
|
270
|
12676
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import json
import sys
import getopt
import logging
from shadowsocks.common import to_bytes, to_str, IPNetwork
from shadowsocks import encrypt
VERBOSE_LEVEL = 5
verbose = 0
def check_python():
info = sys.version_info
if info[0] == 2 and not info[1] >= 6:
print('Python 2.6+ required')
sys.exit(1)
elif info[0] == 3 and not info[1] >= 3:
print('Python 3.3+ required')
sys.exit(1)
elif info[0] not in [2, 3]:
print('Python version not supported')
sys.exit(1)
def print_exception(e):
global verbose
logging.error(e)
if verbose > 0:
import traceback
traceback.print_exc()
def print_shadowsocks():
version = ''
try:
import pkg_resources
version = pkg_resources.get_distribution('shadowsocks').version
except Exception:
pass
print('Shadowsocks %s' % version)
def find_config():
config_path = 'config.json'
if os.path.exists(config_path):
return config_path
config_path = os.path.join(os.path.dirname(__file__), '../', 'config.json')
if os.path.exists(config_path):
return config_path
return None
def check_config(config, is_local):
if config.get('daemon', None) == 'stop':
# no need to specify configuration for daemon stop
return
if is_local and not config.get('password', None):
logging.error('password not specified')
print_help(is_local)
sys.exit(2)
if not is_local and not config.get('password', None) \
and not config.get('port_password', None):
logging.error('password or port_password not specified')
print_help(is_local)
sys.exit(2)
if 'local_port' in config:
config['local_port'] = int(config['local_port'])
if 'server_port' in config and type(config['server_port']) != list:
config['server_port'] = int(config['server_port'])
if config.get('local_address', '') in [b'0.0.0.0']:
logging.warn('warning: local set to listen on 0.0.0.0, it\'s not safe')
if config.get('server', '') in ['127.0.0.1', 'localhost']:
logging.warn('warning: server set to listen on %s:%s, are you sure?' %
(to_str(config['server']), config['server_port']))
if (config.get('method', '') or '').lower() == 'table':
logging.warn('warning: table is not safe; please use a safer cipher, '
'like AES-256-CFB')
if (config.get('method', '') or '').lower() == 'rc4':
logging.warn('warning: RC4 is not safe; please use a safer cipher, '
'like AES-256-CFB')
if config.get('timeout', 300) < 100:
logging.warn('warning: your timeout %d seems too short' %
int(config.get('timeout')))
if config.get('timeout', 300) > 600:
logging.warn('warning: your timeout %d seems too long' %
int(config.get('timeout')))
if config.get('password') in [b'mypassword']:
logging.error('DON\'T USE DEFAULT PASSWORD! Please change it in your '
'config.json!')
sys.exit(1)
if config.get('user', None) is not None:
if os.name != 'posix':
logging.error('user can be used only on Unix')
sys.exit(1)
encrypt.try_cipher(config['password'], config['method'])
def get_config(is_local):
global verbose
logging.basicConfig(level=logging.INFO,
format='%(levelname)-s: %(message)s')
if is_local:
shortopts = 'hd:s:b:p:k:l:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'user=',
'version']
else:
shortopts = 'hd:s:p:k:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'workers=',
'forbidden-ip=', 'user=', 'manager-address=', 'version']
try:
config_path = find_config()
optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
for key, value in optlist:
if key == '-c':
config_path = value
if config_path:
logging.info('loading config from %s' % config_path)
with open(config_path, 'rb') as f:
try:
config = parse_json_in_str(f.read().decode('utf8'))
except ValueError as e:
logging.error('found an error in config.json: %s',
e.message)
sys.exit(1)
else:
config = {}
v_count = 0
for key, value in optlist:
if key == '-p':
config['server_port'] = int(value)
elif key == '-k':
config['password'] = to_bytes(value)
elif key == '-l':
config['local_port'] = int(value)
elif key == '-s':
config['server'] = to_str(value)
elif key == '-m':
config['method'] = to_str(value)
elif key == '-b':
config['local_address'] = to_str(value)
elif key == '-v':
v_count += 1
# '-vv' turns on more verbose mode
config['verbose'] = v_count
elif key == '-t':
config['timeout'] = int(value)
elif key == '--fast-open':
config['fast_open'] = True
elif key == '--workers':
config['workers'] = int(value)
elif key == '--manager-address':
config['manager_address'] = value
elif key == '--user':
config['user'] = to_str(value)
elif key == '--forbidden-ip':
config['forbidden_ip'] = to_str(value).split(',')
elif key in ('-h', '--help'):
if is_local:
print_local_help()
else:
print_server_help()
sys.exit(0)
elif key == '--version':
print_shadowsocks()
sys.exit(0)
elif key == '-d':
config['daemon'] = to_str(value)
elif key == '--pid-file':
config['pid-file'] = to_str(value)
elif key == '--log-file':
config['log-file'] = to_str(value)
elif key == '-q':
v_count -= 1
config['verbose'] = v_count
except getopt.GetoptError as e:
print(e, file=sys.stderr)
print_help(is_local)
sys.exit(2)
if not config:
logging.error('config not specified')
print_help(is_local)
sys.exit(2)
config['password'] = to_bytes(config.get('password', b''))
config['method'] = to_str(config.get('method', 'aes-256-cfb'))
config['port_password'] = config.get('port_password', None)
config['timeout'] = int(config.get('timeout', 300))
config['fast_open'] = config.get('fast_open', False)
config['workers'] = config.get('workers', 1)
config['pid-file'] = config.get('pid-file', '/var/run/shadowsocks.pid')
config['log-file'] = config.get('log-file', '/var/log/shadowsocks.log')
config['verbose'] = config.get('verbose', False)
config['local_address'] = to_str(config.get('local_address', '127.0.0.1'))
config['local_port'] = config.get('local_port', 1080)
if is_local:
if config.get('server', None) is None:
logging.error('server addr not specified')
print_local_help()
sys.exit(2)
else:
config['server'] = to_str(config['server'])
else:
config['server'] = to_str(config.get('server', '0.0.0.0'))
try:
config['forbidden_ip'] = \
IPNetwork(config.get('forbidden_ip', '127.0.0.0/8,::1/128'))
except Exception as e:
logging.error(e)
sys.exit(2)
config['server_port'] = config.get('server_port', 8388)
logging.getLogger('').handlers = []
logging.addLevelName(VERBOSE_LEVEL, 'VERBOSE')
if config['verbose'] >= 2:
level = VERBOSE_LEVEL
elif config['verbose'] == 1:
level = logging.DEBUG
elif config['verbose'] == -1:
level = logging.WARN
elif config['verbose'] <= -2:
level = logging.ERROR
else:
level = logging.INFO
verbose = config['verbose']
logging.basicConfig(level=level,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
check_config(config, is_local)
return config
def print_help(is_local):
if is_local:
print_local_help()
else:
print_server_help()
def print_local_help():
print('''usage: sslocal [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address
-p SERVER_PORT server port, default: 8388
-b LOCAL_ADDR local binding address, default: 127.0.0.1
-l LOCAL_PORT local port, default: 1080
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def print_server_help():
print('''usage: ssserver [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address, default: 0.0.0.0
-p SERVER_PORT server port, default: 8388
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
--workers WORKERS number of workers, available on Unix/Linux
--forbidden-ip IPLIST comma seperated IP list forbidden to connect
--manager-address ADDR optional server manager UDP address, see wiki
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def _decode_list(data):
rv = []
for item in data:
if hasattr(item, 'encode'):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.items():
if hasattr(value, 'encode'):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def parse_json_in_str(data):
# parse json and convert everything from unicode to str
return json.loads(data, object_hook=_decode_dict)
|
apache-2.0
|
mattclay/ansible
|
lib/ansible/plugins/shell/powershell.py
|
29
|
11352
|
# Copyright (c) 2014, Chris Church <chris@ninemoreminutes.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: powershell
version_added: historical
short_description: Windows PowerShell
description:
- The only option when using 'winrm' or 'psrp' as a connection plugin.
- Can also be used when using 'ssh' as a connection plugin and the C(DefaultShell) has been configured to PowerShell.
extends_documentation_fragment:
- shell_windows
'''
import base64
import os
import re
import shlex
import pkgutil
import xml.etree.ElementTree as ET
import ntpath
from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins.shell import ShellBase
_common_args = ['PowerShell', '-NoProfile', '-NonInteractive', '-ExecutionPolicy', 'Unrestricted']
def _parse_clixml(data, stream="Error"):
"""
Takes a byte string like '#< CLIXML\r\n<Objs...' and extracts the stream
message encoded in the XML data. CLIXML is used by PowerShell to encode
multiple objects in stderr.
"""
lines = []
# There are some scenarios where the stderr contains a nested CLIXML element like
# '<# CLIXML\r\n<# CLIXML\r\n<Objs>...</Objs><Objs>...</Objs>'.
# Parse each individual <Objs> element and add the error strings to our stderr list.
# https://github.com/ansible/ansible/issues/69550
while data:
end_idx = data.find(b"</Objs>") + 7
current_element = data[data.find(b"<Objs "):end_idx]
data = data[end_idx:]
clixml = ET.fromstring(current_element)
namespace_match = re.match(r'{(.*)}', clixml.tag)
namespace = "{%s}" % namespace_match.group(1) if namespace_match else ""
strings = clixml.findall("./%sS" % namespace)
lines.extend([e.text.replace('_x000D__x000A_', '') for e in strings if e.attrib.get('S') == stream])
return to_bytes('\r\n'.join(lines))
class ShellModule(ShellBase):
# Common shell filenames that this plugin handles
# Powershell is handled differently. It's selected when winrm is the
# connection
COMPATIBLE_SHELLS = frozenset()
# Family of shells this has. Must match the filename without extension
SHELL_FAMILY = 'powershell'
_SHELL_REDIRECT_ALLNULL = '> $null'
_SHELL_AND = ';'
# Used by various parts of Ansible to do Windows specific changes
_IS_WINDOWS = True
# TODO: add binary module support
def env_prefix(self, **kwargs):
# powershell/winrm env handling is handled in the exec wrapper
return ""
def join_path(self, *args):
# use normpath() to remove doubled slashed and convert forward to backslashes
parts = [ntpath.normpath(self._unquote(arg)) for arg in args]
# Becuase ntpath.join treats any component that begins with a backslash as an absolute path,
# we have to strip slashes from at least the beginning, otherwise join will ignore all previous
# path components except for the drive.
return ntpath.join(parts[0], *[part.strip('\\') for part in parts[1:]])
def get_remote_filename(self, pathname):
# powershell requires that script files end with .ps1
base_name = os.path.basename(pathname.strip())
name, ext = os.path.splitext(base_name.strip())
if ext.lower() not in ['.ps1', '.exe']:
return name + '.ps1'
return base_name.strip()
def path_has_trailing_slash(self, path):
# Allow Windows paths to be specified using either slash.
path = self._unquote(path)
return path.endswith('/') or path.endswith('\\')
def chmod(self, paths, mode):
raise NotImplementedError('chmod is not implemented for Powershell')
def chown(self, paths, user):
raise NotImplementedError('chown is not implemented for Powershell')
def set_user_facl(self, paths, user, mode):
raise NotImplementedError('set_user_facl is not implemented for Powershell')
def remove(self, path, recurse=False):
path = self._escape(self._unquote(path))
if recurse:
return self._encode_script('''Remove-Item '%s' -Force -Recurse;''' % path)
else:
return self._encode_script('''Remove-Item '%s' -Force;''' % path)
def mkdtemp(self, basefile=None, system=False, mode=None, tmpdir=None):
# Windows does not have an equivalent for the system temp files, so
# the param is ignored
if not basefile:
basefile = self.__class__._generate_temp_dir_name()
basefile = self._escape(self._unquote(basefile))
basetmpdir = tmpdir if tmpdir else self.get_option('remote_tmp')
script = '''
$tmp_path = [System.Environment]::ExpandEnvironmentVariables('%s')
$tmp = New-Item -Type Directory -Path $tmp_path -Name '%s'
Write-Output -InputObject $tmp.FullName
''' % (basetmpdir, basefile)
return self._encode_script(script.strip())
def expand_user(self, user_home_path, username=''):
# PowerShell only supports "~" (not "~username"). Resolve-Path ~ does
# not seem to work remotely, though by default we are always starting
# in the user's home directory.
user_home_path = self._unquote(user_home_path)
if user_home_path == '~':
script = 'Write-Output (Get-Location).Path'
elif user_home_path.startswith('~\\'):
script = "Write-Output ((Get-Location).Path + '%s')" % self._escape(user_home_path[1:])
else:
script = "Write-Output '%s'" % self._escape(user_home_path)
return self._encode_script(script)
def exists(self, path):
path = self._escape(self._unquote(path))
script = '''
If (Test-Path '%s')
{
$res = 0;
}
Else
{
$res = 1;
}
Write-Output '$res';
Exit $res;
''' % path
return self._encode_script(script)
def checksum(self, path, *args, **kwargs):
path = self._escape(self._unquote(path))
script = '''
If (Test-Path -PathType Leaf '%(path)s')
{
$sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
$fp = [System.IO.File]::Open('%(path)s', [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
[System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
$fp.Dispose();
}
ElseIf (Test-Path -PathType Container '%(path)s')
{
Write-Output "3";
}
Else
{
Write-Output "1";
}
''' % dict(path=path)
return self._encode_script(script)
def build_module_command(self, env_string, shebang, cmd, arg_path=None):
bootstrap_wrapper = pkgutil.get_data("ansible.executor.powershell", "bootstrap_wrapper.ps1")
# pipelining bypass
if cmd == '':
return self._encode_script(script=bootstrap_wrapper, strict_mode=False, preserve_rc=False)
# non-pipelining
cmd_parts = shlex.split(cmd, posix=False)
cmd_parts = list(map(to_text, cmd_parts))
if shebang and shebang.lower() == '#!powershell':
if not self._unquote(cmd_parts[0]).lower().endswith('.ps1'):
# we're running a module via the bootstrap wrapper
cmd_parts[0] = '"%s.ps1"' % self._unquote(cmd_parts[0])
wrapper_cmd = "type " + cmd_parts[0] + " | " + self._encode_script(script=bootstrap_wrapper, strict_mode=False, preserve_rc=False)
return wrapper_cmd
elif shebang and shebang.startswith('#!'):
cmd_parts.insert(0, shebang[2:])
elif not shebang:
# The module is assumed to be a binary
cmd_parts[0] = self._unquote(cmd_parts[0])
cmd_parts.append(arg_path)
script = '''
Try
{
%s
%s
}
Catch
{
$_obj = @{ failed = $true }
If ($_.Exception.GetType)
{
$_obj.Add('msg', $_.Exception.Message)
}
Else
{
$_obj.Add('msg', $_.ToString())
}
If ($_.InvocationInfo.PositionMessage)
{
$_obj.Add('exception', $_.InvocationInfo.PositionMessage)
}
ElseIf ($_.ScriptStackTrace)
{
$_obj.Add('exception', $_.ScriptStackTrace)
}
Try
{
$_obj.Add('error_record', ($_ | ConvertTo-Json | ConvertFrom-Json))
}
Catch
{
}
Echo $_obj | ConvertTo-Json -Compress -Depth 99
Exit 1
}
''' % (env_string, ' '.join(cmd_parts))
return self._encode_script(script, preserve_rc=False)
def wrap_for_exec(self, cmd):
return '& %s; exit $LASTEXITCODE' % cmd
def _unquote(self, value):
'''Remove any matching quotes that wrap the given value.'''
value = to_text(value or '')
m = re.match(r'^\s*?\'(.*?)\'\s*?$', value)
if m:
return m.group(1)
m = re.match(r'^\s*?"(.*?)"\s*?$', value)
if m:
return m.group(1)
return value
def _escape(self, value):
'''Return value escaped for use in PowerShell single quotes.'''
# There are 5 chars that need to be escaped in a single quote.
# https://github.com/PowerShell/PowerShell/blob/b7cb335f03fe2992d0cbd61699de9d9aafa1d7c1/src/System.Management.Automation/engine/parser/CharTraits.cs#L265-L272
return re.compile(u"(['\u2018\u2019\u201a\u201b])").sub(u'\\1\\1', value)
def _encode_script(self, script, as_list=False, strict_mode=True, preserve_rc=True):
'''Convert a PowerShell script to a single base64-encoded command.'''
script = to_text(script)
if script == u'-':
cmd_parts = _common_args + ['-Command', '-']
else:
if strict_mode:
script = u'Set-StrictMode -Version Latest\r\n%s' % script
# try to propagate exit code if present- won't work with begin/process/end-style scripts (ala put_file)
# NB: the exit code returned may be incorrect in the case of a successful command followed by an invalid command
if preserve_rc:
script = u'%s\r\nIf (-not $?) { If (Get-Variable LASTEXITCODE -ErrorAction SilentlyContinue) { exit $LASTEXITCODE } Else { exit 1 } }\r\n'\
% script
script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()])
encoded_script = to_text(base64.b64encode(script.encode('utf-16-le')), 'utf-8')
cmd_parts = _common_args + ['-EncodedCommand', encoded_script]
if as_list:
return cmd_parts
return ' '.join(cmd_parts)
|
gpl-3.0
|
alikins/ansible
|
lib/ansible/modules/cloud/cloudstack/cs_loadbalancer_rule_member.py
|
49
|
9858
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Darren Worrall <darren@iweb.co.uk>
# (c) 2015, RenΓ© Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_loadbalancer_rule_member
short_description: Manages load balancer rule members on Apache CloudStack based clouds.
description:
- Add and remove load balancer rule members.
version_added: '2.0'
author:
- "Darren Worrall (@dazworrall)"
- "RenΓ© Moser (@resmo)"
options:
name:
description:
- The name of the load balancer rule.
required: true
ip_address:
description:
- Public IP address from where the network traffic will be load balanced from.
- Only needed to find the rule if C(name) is not unique.
required: false
default: null
aliases: [ 'public_ip' ]
vms:
description:
- List of VMs to assign to or remove from the rule.
required: true
aliases: [ 'vm' ]
state:
description:
- Should the VMs be present or absent from the rule.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
project:
description:
- Name of the project the firewall rule is related to.
required: false
default: null
domain:
description:
- Domain the rule is related to.
required: false
default: null
account:
description:
- Account the rule is related to.
required: false
default: null
zone:
description:
- Name of the zone in which the rule should be located.
- If not set, default zone is used.
required: false
default: null
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Add VMs to an existing load balancer
- local_action:
module: cs_loadbalancer_rule_member
name: balance_http
vms:
- web01
- web02
# Remove a VM from an existing load balancer
- local_action:
module: cs_loadbalancer_rule_member
name: balance_http
vms:
- web01
- web02
state: absent
# Rolling upgrade of hosts
- hosts: webservers
serial: 1
pre_tasks:
- name: Remove from load balancer
local_action:
module: cs_loadbalancer_rule_member
name: balance_http
vm: "{{ ansible_hostname }}"
state: absent
tasks:
# Perform update
post_tasks:
- name: Add to load balancer
local_action:
module: cs_loadbalancer_rule_member
name: balance_http
vm: "{{ ansible_hostname }}"
state: present
'''
RETURN = '''
---
id:
description: UUID of the rule.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
zone:
description: Name of zone the rule is related to.
returned: success
type: string
sample: ch-gva-2
project:
description: Name of project the rule is related to.
returned: success
type: string
sample: Production
account:
description: Account the rule is related to.
returned: success
type: string
sample: example account
domain:
description: Domain the rule is related to.
returned: success
type: string
sample: example domain
algorithm:
description: Load balancer algorithm used.
returned: success
type: string
sample: "source"
cidr:
description: CIDR to forward traffic from.
returned: success
type: string
sample: ""
name:
description: Name of the rule.
returned: success
type: string
sample: "http-lb"
description:
description: Description of the rule.
returned: success
type: string
sample: "http load balancer rule"
protocol:
description: Protocol of the rule.
returned: success
type: string
sample: "tcp"
public_port:
description: Public port.
returned: success
type: string
sample: 80
private_port:
description: Private IP address.
returned: success
type: string
sample: 80
public_ip:
description: Public IP address.
returned: success
type: string
sample: "1.2.3.4"
vms:
description: Rule members.
returned: success
type: list
sample: '[ "web01", "web02" ]'
tags:
description: List of resource tags associated with the rule.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
state:
description: State of the rule.
returned: success
type: string
sample: "Add"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackLBRuleMember(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackLBRuleMember, self).__init__(module)
self.returns = {
'publicip': 'public_ip',
'algorithm': 'algorithm',
'cidrlist': 'cidr',
'protocol': 'protocol',
}
# these values will be casted to int
self.returns_to_int = {
'publicport': 'public_port',
'privateport': 'private_port',
}
def get_rule(self):
args = self._get_common_args()
args.update({
'name': self.module.params.get('name'),
'zoneid': self.get_zone(key='id') if self.module.params.get('zone') else None,
})
if self.module.params.get('ip_address'):
args['publicipid'] = self.get_ip_address(key='id')
rules = self.query_api('listLoadBalancerRules', **args)
if rules:
if len(rules['loadbalancerrule']) > 1:
self.module.fail_json(msg="More than one rule having name %s. Please pass 'ip_address' as well." % args['name'])
return rules['loadbalancerrule'][0]
return None
def _get_common_args(self):
return {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
}
def _get_members_of_rule(self, rule):
res = self.query_api('listLoadBalancerRuleInstances', id=rule['id'])
return res.get('loadbalancerruleinstance', [])
def _ensure_members(self, operation):
if operation not in ['add', 'remove']:
self.module.fail_json(msg="Bad operation: %s" % operation)
rule = self.get_rule()
if not rule:
self.module.fail_json(msg="Unknown rule: %s" % self.module.params.get('name'))
existing = {}
for vm in self._get_members_of_rule(rule=rule):
existing[vm['name']] = vm['id']
wanted_names = self.module.params.get('vms')
if operation == 'add':
cs_func = self.cs.assignToLoadBalancerRule
to_change = set(wanted_names) - set(existing.keys())
else:
cs_func = self.cs.removeFromLoadBalancerRule
to_change = set(wanted_names) & set(existing.keys())
if not to_change:
return rule
args = self._get_common_args()
vms = self.query_api('listVirtualMachines', **args)
to_change_ids = []
for name in to_change:
for vm in vms.get('virtualmachine', []):
if vm['name'] == name:
to_change_ids.append(vm['id'])
break
else:
self.module.fail_json(msg="Unknown VM: %s" % name)
if to_change_ids:
self.result['changed'] = True
if to_change_ids and not self.module.check_mode:
res = cs_func(
id=rule['id'],
virtualmachineids=to_change_ids,
)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res)
rule = self.get_rule()
return rule
def add_members(self):
return self._ensure_members('add')
def remove_members(self):
return self._ensure_members('remove')
def get_result(self, rule):
super(AnsibleCloudStackLBRuleMember, self).get_result(rule)
if rule:
self.result['vms'] = []
for vm in self._get_members_of_rule(rule=rule):
self.result['vms'].append(vm['name'])
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
ip_address=dict(aliases=['public_ip']),
vms=dict(required=True, aliases=['vm'], type='list'),
state=dict(choices=['present', 'absent'], default='present'),
zone=dict(),
domain=dict(),
project=dict(),
account=dict(),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_lb_rule_member = AnsibleCloudStackLBRuleMember(module)
state = module.params.get('state')
if state in ['absent']:
rule = acs_lb_rule_member.remove_members()
else:
rule = acs_lb_rule_member.add_members()
result = acs_lb_rule_member.get_result(rule)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
timopulkkinen/BubbleFish
|
chrome/test/functional/tracing/timeline_model.py
|
69
|
1823
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
class TimelineModel(object):
"""A proxy for about:tracing's TimelineModel class.
Test authors should never need to know that this class is a proxy.
"""
@staticmethod
def _EscapeForQuotedJavascriptExecution(js):
# Poor man's string escape.
return js.replace('\'', '\\\'');
def __init__(self, js_executor, shim_id):
self._js_executor = js_executor
self._shim_id = shim_id
# Warning: The JSON serialization process removes cyclic references.
# TODO(eatnumber): regenerate these cyclic references on deserialization.
def _CallModelMethod(self, method_name, *args):
result = self._js_executor(
"""window.timelineModelShims['%s'].invokeMethod('%s', '%s')""" % (
self._shim_id,
self._EscapeForQuotedJavascriptExecution(method_name),
self._EscapeForQuotedJavascriptExecution(json.dumps(args))
)
)
if result['success']:
return result['data']
# TODO(eatnumber): Make these exceptions more reader friendly.
raise RuntimeError(result)
def __del__(self):
self._js_executor("""
window.timelineModelShims['%s'] = undefined;
window.domAutomationController.send('');
""" % self._shim_id)
def GetAllThreads(self):
return self._CallModelMethod('getAllThreads')
def GetAllCpus(self):
return self._CallModelMethod('getAllCpus')
def GetAllProcesses(self):
return self._CallModelMethod('getAllProcesses')
def GetAllCounters(self):
return self._CallModelMethod('getAllCounters')
def FindAllThreadsNamed(self, name):
return self._CallModelMethod('findAllThreadsNamed', name);
|
bsd-3-clause
|
siberider/androguard
|
tests/test_types.py
|
38
|
3683
|
#!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (C) 2010, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
import sys
PATH_INSTALL = "./"
sys.path.append(PATH_INSTALL)
from androguard.core.androgen import AndroguardS
from androguard.core.analysis import analysis
#TEST_CASE = 'examples/android/TC/bin/classes.dex'
TEST_CASE = 'examples/android/TestsAndroguard/bin/classes.dex'
VALUES_ = { "Lorg/t0t0/androguard/TC/TestType1; <init> ()V" : [
42,
-42,
0,
42,
-42,
0,
42.0,
-42.0,
0.0,
42.0,
-42.0,
0.0,
],
}
VALUES = { 'Ltests/androguard/TestActivity; testDouble ()V' : [
-5.0,
-4,
-3,
-2,
-1,
0,
1,
2,
3,
4,
5,
-5,
-4,
-3,
-2,
-1,
0,
1,
2,
3,
4,
5,
-5,
-4,
-3,
-2,
-1,
0,
1,
2,
3,
4,
5,
65534,
65535,
65536,
65537,
32769,
32768,
32767,
32766,
65534,
65535,
65536,
65537,
32769,
32768,
32767,
32766,
65534,
65535,
65536,
65537,
32769,
32768,
32767,
32766,
5346952,
5346952,
5346952,
65534.5,
65535.5,
65536.5,
65537.5,
32769.5,
32768.5,
32767.5,
32766.5,
65534.5,
65535.5,
65536.5,
65537.5,
32769.5,
32768.5,
32767.5,
32766.5,
-5,
-65535,
-65536,
-123456789123456789.555555555,
-123456789123456789.555555555,
-606384730,
-123456790519087104,
3.5
],
}
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
a = AndroguardS( TEST_CASE )
for method in a.get_methods() :
key = method.get_class_name() + " " + method.get_name() + " " + method.get_descriptor()
if key not in VALUES :
continue
print method.get_class_name(), method.get_name(), method.get_descriptor()
code = method.get_code()
bc = code.get_bc()
idx = 0
for i in bc.get() :
#print "\t", "%x" % idx, i.get_name(), i.get_operands()
if "const" in i.get_name() :
formatted_operands = i.get_formatted_operands()
for f in formatted_operands :
# print i.get_name(), i.get_operands(), i.get_formatted_operands()
test( f[1], VALUES[ key ].pop(0) )
idx += i.get_length()
|
apache-2.0
|
RacerXx/GoAtThrottleUp
|
ServerRelay/cherrypy/tutorial/tut09_files.py
|
33
|
3596
|
"""
Tutorial: File upload and download
Uploads
-------
When a client uploads a file to a CherryPy application, it's placed
on disk immediately. CherryPy will pass it to your exposed method
as an argument (see "myFile" below); that arg will have a "file"
attribute, which is a handle to the temporary uploaded file.
If you wish to permanently save the file, you need to read()
from myFile.file and write() somewhere else.
Note the use of 'enctype="multipart/form-data"' and 'input type="file"'
in the HTML which the client uses to upload the file.
Downloads
---------
If you wish to send a file to the client, you have two options:
First, you can simply return a file-like object from your page handler.
CherryPy will read the file and serve it as the content (HTTP body)
of the response. However, that doesn't tell the client that
the response is a file to be saved, rather than displayed.
Use cherrypy.lib.static.serve_file for that; it takes four
arguments:
serve_file(path, content_type=None, disposition=None, name=None)
Set "name" to the filename that you expect clients to use when they save
your file. Note that the "name" argument is ignored if you don't also
provide a "disposition" (usually "attachement"). You can manually set
"content_type", but be aware that if you also use the encoding tool, it
may choke if the file extension is not recognized as belonging to a known
Content-Type. Setting the content_type to "application/x-download" works
in most cases, and should prompt the user with an Open/Save dialog in
popular browsers.
"""
import os
localDir = os.path.dirname(__file__)
absDir = os.path.join(os.getcwd(), localDir)
import cherrypy
from cherrypy.lib import static
class FileDemo(object):
def index(self):
return """
<html><body>
<h2>Upload a file</h2>
<form action="upload" method="post" enctype="multipart/form-data">
filename: <input type="file" name="myFile" /><br />
<input type="submit" />
</form>
<h2>Download a file</h2>
<a href='download'>This one</a>
</body></html>
"""
index.exposed = True
def upload(self, myFile):
out = """<html>
<body>
myFile length: %s<br />
myFile filename: %s<br />
myFile mime-type: %s
</body>
</html>"""
# Although this just counts the file length, it demonstrates
# how to read large files in chunks instead of all at once.
# CherryPy reads the uploaded file into a temporary file;
# myFile.file.read reads from that.
size = 0
while True:
data = myFile.file.read(8192)
if not data:
break
size += len(data)
return out % (size, myFile.filename, myFile.content_type)
upload.exposed = True
def download(self):
path = os.path.join(absDir, "pdf_file.pdf")
return static.serve_file(path, "application/x-download",
"attachment", os.path.basename(path))
download.exposed = True
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(FileDemo(), config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(FileDemo(), config=tutconf)
|
mit
|
GustavoHennig/ansible
|
test/integration/cleanup_ec2.py
|
66
|
6768
|
'''
Find and delete AWS resources matching the provided --match string. Unless
--yes|-y is provided, the prompt for confirmation prior to deleting resources.
Please use caution, you can easily delete you're *ENTIRE* EC2 infrastructure.
'''
import os
import re
import sys
import boto
import optparse
import yaml
import os.path
import boto.ec2.elb
import time
def delete_aws_resources(get_func, attr, opts):
for item in get_func():
val = getattr(item, attr)
if re.search(opts.match_re, val):
prompt_and_delete(item, "Delete matching %s? [y/n]: " % (item,), opts.assumeyes)
def delete_autoscaling_group(get_func, attr, opts):
assumeyes = opts.assumeyes
group_name = None
for item in get_func():
group_name = getattr(item, attr)
if re.search(opts.match_re, group_name):
if not opts.assumeyes:
assumeyes = raw_input("Delete matching %s? [y/n]: " % (item).lower()) == 'y'
break
if assumeyes and group_name:
groups = asg.get_all_groups(names=[group_name])
if groups:
group = groups[0]
group.max_size = 0
group.min_size = 0
group.desired_capacity = 0
group.update()
instances = True
while instances:
tmp_groups = asg.get_all_groups(names=[group_name])
if tmp_groups:
tmp_group = tmp_groups[0]
if not tmp_group.instances:
instances = False
time.sleep(10)
group.delete()
while len(asg.get_all_groups(names=[group_name])):
time.sleep(5)
print ("Terminated ASG: %s" % group_name)
def delete_aws_eips(get_func, attr, opts):
# the file might not be there if the integration test wasn't run
try:
eip_log = open(opts.eip_log, 'r').read().splitlines()
except IOError:
print('%s not found.' % opts.eip_log)
return
for item in get_func():
val = getattr(item, attr)
if val in eip_log:
prompt_and_delete(item, "Delete matching %s? [y/n]: " % (item,), opts.assumeyes)
def delete_aws_instances(reservation, opts):
for list in reservation:
for item in list.instances:
prompt_and_delete(item, "Delete matching %s? [y/n]: " % (item,), opts.assumeyes)
def prompt_and_delete(item, prompt, assumeyes):
if not assumeyes:
assumeyes = raw_input(prompt).lower() == 'y'
assert hasattr(item, 'delete') or hasattr(item, 'terminate') , "Class <%s> has no delete or terminate attribute" % item.__class__
if assumeyes:
if hasattr(item, 'delete'):
item.delete()
print ("Deleted %s" % item)
if hasattr(item, 'terminate'):
item.terminate()
print ("Terminated %s" % item)
def parse_args():
# Load details from credentials.yml
default_aws_access_key = os.environ.get('AWS_ACCESS_KEY', None)
default_aws_secret_key = os.environ.get('AWS_SECRET_KEY', None)
if os.path.isfile('credentials.yml'):
credentials = yaml.load(open('credentials.yml', 'r'))
if default_aws_access_key is None:
default_aws_access_key = credentials['ec2_access_key']
if default_aws_secret_key is None:
default_aws_secret_key = credentials['ec2_secret_key']
parser = optparse.OptionParser(usage="%s [options]" % (sys.argv[0],),
description=__doc__)
parser.add_option("--access",
action="store", dest="ec2_access_key",
default=default_aws_access_key,
help="Amazon ec2 access id. Can use EC2_ACCESS_KEY environment variable, or a values from credentials.yml.")
parser.add_option("--secret",
action="store", dest="ec2_secret_key",
default=default_aws_secret_key,
help="Amazon ec2 secret key. Can use EC2_SECRET_KEY environment variable, or a values from credentials.yml.")
parser.add_option("--eip-log",
action="store", dest="eip_log",
default = None,
help = "Path to log of EIPs created during test.")
parser.add_option("--integration-config",
action="store", dest="int_config",
default = "integration_config.yml",
help = "path to integration config")
parser.add_option("--credentials", "-c",
action="store", dest="credential_file",
default="credentials.yml",
help="YAML file to read cloud credentials (default: %default)")
parser.add_option("--yes", "-y",
action="store_true", dest="assumeyes",
default=False,
help="Don't prompt for confirmation")
parser.add_option("--match",
action="store", dest="match_re",
default="^ansible-testing-",
help="Regular expression used to find AWS resources (default: %default)")
(opts, args) = parser.parse_args()
for required in ['ec2_access_key', 'ec2_secret_key']:
if getattr(opts, required) is None:
parser.error("Missing required parameter: --%s" % required)
return (opts, args)
if __name__ == '__main__':
(opts, args) = parse_args()
int_config = yaml.load(open(opts.int_config).read())
if not opts.eip_log:
output_dir = os.path.expanduser(int_config["output_dir"])
opts.eip_log = output_dir + '/' + opts.match_re.replace('^','') + '-eip_integration_tests.log'
# Connect to AWS
aws = boto.connect_ec2(aws_access_key_id=opts.ec2_access_key,
aws_secret_access_key=opts.ec2_secret_key)
elb = boto.connect_elb(aws_access_key_id=opts.ec2_access_key,
aws_secret_access_key=opts.ec2_secret_key)
asg = boto.connect_autoscale(aws_access_key_id=opts.ec2_access_key,
aws_secret_access_key=opts.ec2_secret_key)
try:
# Delete matching keys
delete_aws_resources(aws.get_all_key_pairs, 'name', opts)
# Delete matching security groups
delete_aws_resources(aws.get_all_security_groups, 'name', opts)
# Delete matching ASGs
delete_autoscaling_group(asg.get_all_groups, 'name', opts)
# Delete matching launch configs
delete_aws_resources(asg.get_all_launch_configurations, 'name', opts)
# Delete ELBs
delete_aws_resources(elb.get_all_load_balancers, 'name', opts)
# Delete recorded EIPs
delete_aws_eips(aws.get_all_addresses, 'public_ip', opts)
# Delete temporary instances
filters = {"tag:Name":opts.match_re.replace('^',''), "instance-state-name": ['running', 'pending', 'stopped' ]}
delete_aws_instances(aws.get_all_instances(filters=filters), opts)
except KeyboardInterrupt as e:
print("\nExiting on user command.")
|
gpl-3.0
|
hantek/fuel
|
tests/test_serialization.py
|
25
|
1403
|
import os
import tempfile
import numpy
from six.moves import cPickle
from fuel.streams import DataStream
from fuel.datasets import MNIST
from fuel.schemes import SequentialScheme
from tests import skip_if_not_available
def test_in_memory():
skip_if_not_available(datasets=['mnist.hdf5'])
# Load MNIST and get two batches
mnist = MNIST(('train',), load_in_memory=True)
data_stream = DataStream(mnist, iteration_scheme=SequentialScheme(
examples=mnist.num_examples, batch_size=256))
epoch = data_stream.get_epoch_iterator()
for i, (features, targets) in enumerate(epoch):
if i == 1:
break
handle = mnist.open()
known_features, _ = mnist.get_data(handle, slice(256, 512))
mnist.close(handle)
assert numpy.all(features == known_features)
# Pickle the epoch and make sure that the data wasn't dumped
with tempfile.NamedTemporaryFile(delete=False) as f:
filename = f.name
cPickle.dump(epoch, f)
assert os.path.getsize(filename) < 1024 * 1024 # Less than 1MB
# Reload the epoch and make sure that the state was maintained
del epoch
with open(filename, 'rb') as f:
epoch = cPickle.load(f)
features, targets = next(epoch)
handle = mnist.open()
known_features, _ = mnist.get_data(handle, slice(512, 768))
mnist.close(handle)
assert numpy.all(features == known_features)
|
mit
|
matthewlent/ng-boilerplate-flask
|
venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/sax.py
|
1835
|
1661
|
from __future__ import absolute_import, division, unicode_literals
from xml.sax.xmlreader import AttributesNSImpl
from ..constants import adjustForeignAttributes, unadjustForeignAttributes
prefix_mapping = {}
for prefix, localName, namespace in adjustForeignAttributes.values():
if prefix is not None:
prefix_mapping[prefix] = namespace
def to_sax(walker, handler):
"""Call SAX-like content handler based on treewalker walker"""
handler.startDocument()
for prefix, namespace in prefix_mapping.items():
handler.startPrefixMapping(prefix, namespace)
for token in walker:
type = token["type"]
if type == "Doctype":
continue
elif type in ("StartTag", "EmptyTag"):
attrs = AttributesNSImpl(token["data"],
unadjustForeignAttributes)
handler.startElementNS((token["namespace"], token["name"]),
token["name"],
attrs)
if type == "EmptyTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type == "EndTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type in ("Characters", "SpaceCharacters"):
handler.characters(token["data"])
elif type == "Comment":
pass
else:
assert False, "Unknown token type"
for prefix, namespace in prefix_mapping.items():
handler.endPrefixMapping(prefix)
handler.endDocument()
|
mit
|
joone/chromium-crosswalk
|
tools/grit/grit/format/policy_templates/writers/adml_writer.py
|
41
|
7710
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from xml.dom import minidom
from grit.format.policy_templates.writers import xml_formatted_writer
def GetWriter(config):
'''Factory method for instanciating the ADMLWriter. Every Writer needs a
GetWriter method because the TemplateFormatter uses this method to
instantiate a Writer.
'''
return ADMLWriter(['win'], config)
class ADMLWriter(xml_formatted_writer.XMLFormattedWriter):
''' Class for generating an ADML policy template. It is used by the
PolicyTemplateGenerator to write the ADML file.
'''
# DOM root node of the generated ADML document.
_doc = None
# The string-table contains all ADML "string" elements.
_string_table_elem = None
# The presentation-table is the container for presentation elements, that
# describe the presentation of Policy-Groups and Policies.
_presentation_table_elem = None
def _AddString(self, id, text):
''' Adds an ADML "string" element to _string_table_elem. The following
ADML snippet contains an example:
<string id="$(id)">$(text)</string>
Args:
id: ID of the newly created "string" element.
text: Value of the newly created "string" element.
'''
id = id.replace('.', '_')
if id in self.strings_seen:
assert text == self.strings_seen[id]
else:
self.strings_seen[id] = text
string_elem = self.AddElement(
self._string_table_elem, 'string', {'id': id})
string_elem.appendChild(self._doc.createTextNode(text))
def WritePolicy(self, policy):
'''Generates the ADML elements for a Policy.
<stringTable>
...
<string id="$(policy_group_name)">$(caption)</string>
<string id="$(policy_group_name)_Explain">$(description)</string>
</stringTable>
<presentationTables>
...
<presentation id=$(policy_group_name)/>
</presentationTables>
Args:
policy: The Policy to generate ADML elements for.
'''
policy_type = policy['type']
policy_name = policy['name']
if 'caption' in policy:
policy_caption = policy['caption']
else:
policy_caption = policy_name
if 'desc' in policy:
policy_description = policy['desc']
else:
policy_description = policy_name
if 'label' in policy:
policy_label = policy['label']
else:
policy_label = policy_name
self._AddString(policy_name, policy_caption)
self._AddString(policy_name + '_Explain', policy_description)
presentation_elem = self.AddElement(
self._presentation_table_elem, 'presentation', {'id': policy_name})
if policy_type == 'main':
pass
elif policy_type in ('string', 'dict'):
# 'dict' policies are configured as JSON-encoded strings on Windows.
textbox_elem = self.AddElement(presentation_elem, 'textBox',
{'refId': policy_name})
label_elem = self.AddElement(textbox_elem, 'label')
label_elem.appendChild(self._doc.createTextNode(policy_label))
elif policy_type == 'int':
textbox_elem = self.AddElement(presentation_elem, 'decimalTextBox',
{'refId': policy_name})
textbox_elem.appendChild(self._doc.createTextNode(policy_label + ':'))
elif policy_type in ('int-enum', 'string-enum'):
for item in policy['items']:
self._AddString(item['name'], item['caption'])
dropdownlist_elem = self.AddElement(presentation_elem, 'dropdownList',
{'refId': policy_name})
dropdownlist_elem.appendChild(self._doc.createTextNode(policy_label))
elif policy_type in ('list', 'string-enum-list'):
self._AddString(policy_name + 'Desc', policy_caption)
listbox_elem = self.AddElement(presentation_elem, 'listBox',
{'refId': policy_name + 'Desc'})
listbox_elem.appendChild(self._doc.createTextNode(policy_label))
elif policy_type == 'group':
pass
elif policy_type == 'external':
# This type can only be set through cloud policy.
pass
else:
raise Exception('Unknown policy type %s.' % policy_type)
def BeginPolicyGroup(self, group):
'''Generates ADML elements for a Policy-Group. For each Policy-Group two
ADML "string" elements are added to the string-table. One contains the
caption of the Policy-Group and the other a description. A Policy-Group also
requires an ADML "presentation" element that must be added to the
presentation-table. The "presentation" element is the container for the
elements that define the visual presentation of the Policy-Goup's Policies.
The following ADML snippet shows an example:
Args:
group: The Policy-Group to generate ADML elements for.
'''
# Add ADML "string" elements to the string-table that are required by a
# Policy-Group.
self._AddString(group['name'] + '_group', group['caption'])
def _AddBaseStrings(self, build):
''' Adds ADML "string" elements to the string-table that are referenced by
the ADMX file but not related to any specific Policy-Group or Policy.
'''
self._AddString(self.config['win_supported_os'],
self.messages['win_supported_winxpsp2']['text'])
recommended_name = '%s - %s' % \
(self.config['app_name'], self.messages['doc_recommended']['text'])
if build == 'chrome':
self._AddString(self.config['win_mandatory_category_path'][0],
'Google')
self._AddString(self.config['win_mandatory_category_path'][1],
self.config['app_name'])
self._AddString(self.config['win_recommended_category_path'][1],
recommended_name)
elif build == 'chromium':
self._AddString(self.config['win_mandatory_category_path'][0],
self.config['app_name'])
self._AddString(self.config['win_recommended_category_path'][0],
recommended_name)
def BeginTemplate(self):
dom_impl = minidom.getDOMImplementation('')
self._doc = dom_impl.createDocument(None, 'policyDefinitionResources',
None)
if self._GetChromiumVersionString() is not None:
self.AddComment(self._doc.documentElement, self.config['build'] + \
' version: ' + self._GetChromiumVersionString())
policy_definitions_resources_elem = self._doc.documentElement
policy_definitions_resources_elem.attributes['revision'] = '1.0'
policy_definitions_resources_elem.attributes['schemaVersion'] = '1.0'
self.AddElement(policy_definitions_resources_elem, 'displayName')
self.AddElement(policy_definitions_resources_elem, 'description')
resources_elem = self.AddElement(policy_definitions_resources_elem,
'resources')
self._string_table_elem = self.AddElement(resources_elem, 'stringTable')
self._AddBaseStrings(self.config['build'])
self._presentation_table_elem = self.AddElement(resources_elem,
'presentationTable')
def Init(self):
# Map of all strings seen.
self.strings_seen = {}
def GetTemplateText(self):
# Using "toprettyxml()" confuses the Windows Group Policy Editor
# (gpedit.msc) because it interprets whitespace characters in text between
# the "string" tags. This prevents gpedit.msc from displaying the category
# names correctly.
# TODO(markusheintz): Find a better formatting that works with gpedit.
return self._doc.toxml()
|
bsd-3-clause
|
Parcks/core
|
test/service/test_post_installation_facade.py
|
1
|
3532
|
"""
Scriptable Packages Installer - Parcks
Copyright (C) 2017 JValck - Setarit
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
Setarit - parcks[at]setarit.com
"""
from __future__ import absolute_import
import json
from src.domain.log.logger import Logger
from src.domain.model.post_install.shell import Shell
from src.domain.model.post_install.shell_command import ShellCommand
from src.service.post_installation_facade import PostInstallationFacade
import unittest
try:
from unittest.mock import patch
except ImportError:
from mock import patch
class TestPostInstallationFacade(unittest.TestCase):
def setUp(self):
self.create_facade()
self.create_json_object()
self.create_json_array()
Logger.disable_all()
def tearDown(self):
Logger.enable()
def create_facade(self):
self.facade_under_test = PostInstallationFacade([
Shell("Dummy", [ShellCommand(["ls"])]),
Shell("Dummy", [ShellCommand(["ls"])])
])
def create_json_object(self):
Json = """\
{
"type":"shell",
"name":"Dummy",
"cmds":[
{
"do":["ls"],
"root":false
}
]
}
"""
self.json_object = json.loads(Json)
def create_json_array(self):
Json = """\
[
{
"type":"shell",
"name":"Dummy",
"cmds":[
{
"do":["ls"],
"root":false
}
]
},
{
"type":"shell",
"name":"Dummy",
"cmds":[
{
"do":["ls"],
"root":false
}
]
}
]
"""
self.json_list = json.loads(Json)
@patch.object(Shell, 'run')
def test_handle_post_installation_calls_run_on_PostInstallRunnable(self, mock):
self.facade_under_test.handle_post_installation()
self.assertEqual(2, mock.call_count)
def test_from_json_creates_facade_with_list_if_json_list(self):
facade = PostInstallationFacade.from_json(self.json_list)
self.assertTrue(isinstance(facade.post_install_runnables, list))
def test_from_json_creates_facade_with_list_if_json_object(self):
facade = PostInstallationFacade.from_json(self.json_object)
self.assertTrue(isinstance(facade.post_install_runnables, list))
@patch.object(Shell, 'run')
def test_handle_post_installalation_calls_run_on_PostInstallRunnable_if_one_object(self, mock):
facade = PostInstallationFacade.from_json(self.json_object)
facade.handle_post_installation()
self.assertEqual(1, mock.call_count)
|
gpl-2.0
|
M32Media/redash
|
redash/tasks/queries.py
|
1
|
24668
|
import json
import time
import logging
import signal
import redis
import re
from celery.result import AsyncResult
from celery.utils.log import get_task_logger
from redash import redis_connection, models, statsd_client, settings, utils
from redash.utils import gen_query_hash
from redash.worker import celery
from redash.query_runner import InterruptException
from .alerts import check_alerts_for_query
from redash.authentication.account import send_api_token
logger = get_task_logger(__name__)
@celery.task(name="redash.tasks.refresh_selected_queries")
def refresh_selected_queries(
months, publishers, global_queries=False, non_monthly_publisher_queries=False,
no_query_execution=False):
outdated_queries_count = 0
query_ids = []
all_dashboards = models.Dashboard.query.all()
dashboard_ids_names = [
(db.id, db.name) for db in all_dashboards
if (publishers == ['ALL'] or any(publisher == db.name.split(':')[0] for publisher in publishers))]
if global_queries:
dashboard_ids_names += [(db.id, db.name) for db in all_dashboards if db.name.split(':')[0] == 'Global']
jobs = []
# An example of Dashboard is Cogeco:unsold:stats or Cogeco:segment:profile_referrer
for db_id, db_name in dashboard_ids_names:
dashboard = models.Dashboard.get_by_id(db_id)
layout_list = [widget_id for row in json.loads(dashboard.layout) for widget_id in row]
widgets = [models.Widget.get_by_id(widget_id) for widget_id in layout_list if not widget_id < 0]
# Some widgets are None objects, and this makes the script fail
widgets = [widget for widget in widgets if widget]
for widget in widgets:
condition = widget.visualization != None and any(month in widget.visualization.name for month in months)
if non_monthly_publisher_queries:
# If the flag is True, add the queries where the pattern DDDDDD, with D being a digit, is not present in the query
# This adds everything that is not month dependent to the query list
# e.g. Cogeco:segment:profile_referrer:view_cogeco, Global:Intell:AdManager:view_last_6m
condition = condition or (not re.findall(r'_(\d{6})', widget.visualization.name))
if global_queries:
condition = condition or db_name.split(':')[0] == 'Global'
if condition:
query_id = widget.visualization.query_rel.id
query = models.Query.get_by_id(query_id)
# If no_query_execution flag is enabled, the query is not run and we only return the query text
if no_query_execution:
jobs.append({
'query_text': query.query_text,
'view_name': '{}.{}'.format(db_name, widget.visualization.name)
})
else:
jobs.append({
'task': enqueue_query(
query.query_text, query.data_source, query.user_id,
scheduled_query=query,
metadata={'Query ID': query.id, 'Username': 'Scheduled'}).to_dict(),
'query_text': query.query_text,
'view_name': '{}.{}'.format(db_name, widget.visualization.name)
})
query_ids.append(query.id)
outdated_queries_count += 1
logger.info(jobs)
status = redis_connection.hgetall('redash:status')
now = time.time()
redis_connection.hmset(
'redash:status', {
'outdated_queries_count': outdated_queries_count,
'last_refresh_at': now,
'query_ids': json.dumps(query_ids)})
statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))
return jobs
"""
Gets task associated with ids
"""
def get_tasks(ids):
tasks = {}
if type(ids) is str:
lists = QueryTaskTracker.ALL_LISTS
else:
lists = QueryTaskTracker.ALL_LISTS
#Each different list of query trackers
for _list in lists:
#Each different tracker inside a list
for tracker in QueryTaskTracker.all(_list):
for _id in ids:
#Check if id given is matching a tracker
if(_id == tracker.task_id):
qt = QueryTask(tracker.task_id)
data = qt.to_dict()
tasks[tracker.task_id] = {
"status" : data.get('status', None),
"query_id" : tracker.query_id,
"query_result_id" : data.get('query_result_id', None),
"error": data.get('error', None)
}
return tasks;
def _job_lock_id(query_hash, data_source_id):
return "query_hash_job:%s:%s" % (data_source_id, query_hash)
def _unlock(query_hash, data_source_id):
redis_connection.delete(_job_lock_id(query_hash, data_source_id))
# TODO:
# There is some duplication between this class and QueryTask, but I wanted to implement the monitoring features without
# much changes to the existing code, so ended up creating another object. In the future we can merge them.
class QueryTaskTracker(object):
DONE_LIST = 'query_task_trackers:done'
WAITING_LIST = 'query_task_trackers:waiting'
IN_PROGRESS_LIST = 'query_task_trackers:in_progress'
ALL_LISTS = (DONE_LIST, WAITING_LIST, IN_PROGRESS_LIST)
def __init__(self, data):
self.data = data
@classmethod
def create(cls, task_id, state, query_hash, data_source_id, scheduled, metadata):
data = dict(task_id=task_id, state=state,
query_hash=query_hash, data_source_id=data_source_id,
scheduled=scheduled,
username=metadata.get('Username', 'unknown'),
query_id=metadata.get('Query ID', 'unknown'),
retries=0,
scheduled_retries=0,
created_at=time.time(),
started_at=None,
run_time=None)
return cls(data)
def save(self, connection=None):
if connection is None:
connection = redis_connection
self.data['updated_at'] = time.time()
key_name = self._key_name(self.data['task_id'])
connection.set(key_name, utils.json_dumps(self.data))
connection.zadd(self._get_list(), time.time(), key_name)
for l in self.ALL_LISTS:
if l != self._get_list():
connection.zrem(l, key_name)
# TOOD: this is not thread/concurrency safe. In current code this is not an issue, but better to fix this.
def update(self, **kwargs):
self.data.update(kwargs)
self.save()
@staticmethod
def _key_name(task_id):
return 'query_task_tracker:{}'.format(task_id)
def _get_list(self):
if self.state in ('finished', 'failed', 'cancelled'):
return self.DONE_LIST
if self.state in ('created'):
return self.WAITING_LIST
return self.IN_PROGRESS_LIST
@classmethod
def get_by_task_id(cls, task_id, connection=None):
if connection is None:
connection = redis_connection
key_name = cls._key_name(task_id)
data = connection.get(key_name)
return cls.create_from_data(data)
@classmethod
def create_from_data(cls, data):
if data:
data = json.loads(data)
return cls(data)
return None
@classmethod
def all(cls, list_name, offset=0, limit=-1):
if limit != -1:
limit -= 1
if offset != 0:
offset -= 1
ids = redis_connection.zrevrange(list_name, offset, limit)
pipe = redis_connection.pipeline()
for id in ids:
pipe.get(id)
tasks = [cls.create_from_data(data) for data in pipe.execute()]
return tasks
@classmethod
def prune(cls, list_name, keep_count):
count = redis_connection.zcard(list_name)
if count <= keep_count:
return 0
remove_count = count - keep_count
keys = redis_connection.zrange(list_name, 0, remove_count - 1)
redis_connection.delete(*keys)
redis_connection.zremrangebyrank(list_name, 0, remove_count - 1)
return remove_count
def __getattr__(self, item):
return self.data[item]
def __contains__(self, item):
return item in self.data
class QueryTask(object):
# TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this
STATUSES = {
'PENDING': 1,
'STARTED': 2,
'SUCCESS': 3,
'FAILURE': 4,
'REVOKED': 4
}
def __init__(self, job_id=None, async_result=None):
if async_result:
self._async_result = async_result
else:
self._async_result = AsyncResult(job_id, app=celery)
@property
def id(self):
return self._async_result.id
def to_dict(self):
task_info = self._async_result._get_task_meta()
result, task_status = task_info['result'], task_info['status']
if task_status == 'STARTED':
updated_at = result.get('start_time', 0)
else:
updated_at = 0
status = self.STATUSES[task_status]
if isinstance(result, Exception):
error = result.message
status = 4
elif task_status == 'REVOKED':
error = 'Query execution cancelled.'
else:
error = ''
if task_status == 'SUCCESS' and not error:
query_result_id = result
else:
query_result_id = None
return {
'id': self._async_result.id,
'updated_at': updated_at,
'status': status,
'error': error,
'query_result_id': query_result_id,
}
@property
def is_cancelled(self):
return self._async_result.status == 'REVOKED'
@property
def celery_status(self):
return self._async_result.status
def ready(self):
return self._async_result.ready()
def cancel(self):
return self._async_result.revoke(terminate=True, signal='SIGINT')
def enqueue_query(query, data_source, user_id, scheduled_query=None, metadata={}):
query_hash = gen_query_hash(query)
logging.info("Inserting job for %s with metadata=%s", query_hash, metadata)
try_count = 0
job = None
while try_count < 5:
try_count += 1
pipe = redis_connection.pipeline()
try:
pipe.watch(_job_lock_id(query_hash, data_source.id))
job_id = pipe.get(_job_lock_id(query_hash, data_source.id))
if job_id:
logging.info("[%s] Found existing job: %s", query_hash, job_id)
job = QueryTask(job_id=job_id)
if job.ready():
logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
redis_connection.delete(_job_lock_id(query_hash, data_source.id))
job = None
if not job:
pipe.multi()
if scheduled_query:
queue_name = data_source.scheduled_queue_name
scheduled_query_id = scheduled_query.id
else:
queue_name = data_source.queue_name
scheduled_query_id = None
result = execute_query.apply_async(args=(
query, data_source.id, metadata, user_id,
scheduled_query_id),
queue=queue_name)
job = QueryTask(async_result=result)
tracker = QueryTaskTracker.create(
result.id, 'created', query_hash, data_source.id,
scheduled_query is not None, metadata)
tracker.save(connection=pipe)
logging.info("[%s] Created new job: %s", query_hash, job.id)
pipe.set(_job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
pipe.execute()
break
except redis.WatchError:
continue
if not job:
logging.error("[Manager][%s] Failed adding job for query.", query_hash)
return job
@celery.task(name="redash.tasks.refresh_queries_http")
def refresh_queries_http():
logger.info("Refreshing queries...")
jobs = []
for query in models.Query.every_queries():
logger.info("Updating Query {} ...".format(query.id))
if query.data_source.paused:
logger.info("Skipping refresh of Query 1 {} because datasource {} is paused because {}"
.format(query.id,
query.data_source.name,
query.data_source.pause_reason
))
else:
jobs.append(enqueue_query(query.query_text, query.data_source, query.user_id,
scheduled_query=query,
metadata={'Query ID': query.id, 'Username': 'Scheduled'}))
""" LINK BETWEEN TRACKER AND ACTUAL TASK
for job in jobs:
print("JOBS : {}".format(job.to_dict().get('id', None)))
lists = QueryTaskTracker.ALL_LISTS
for _list in lists:
for tracker in QueryTaskTracker.all(_list):
print("TRACKER : {}".format(tracker.data.get('task_id', None)))
"""
return jobs
@celery.task(name="redash.tasks.refresh_queries")
def refresh_queries():
outdated_queries_count = 0
query_ids = []
with statsd_client.timer('manager.outdated_queries_lookup'):
for query in models.Query.outdated_queries():
if settings.FEATURE_DISABLE_REFRESH_QUERIES:
logging.info("Disabled refresh queries.")
elif query.data_source.paused:
logging.info("Skipping refresh of %s because datasource - %s is paused (%s).", query.id, query.data_source.name, query.data_source.pause_reason)
else:
enqueue_query(query.query_text, query.data_source, query.user_id,
scheduled_query=query,
metadata={'Query ID': query.id, 'Username': 'Scheduled'})
query_ids.append(query.id)
outdated_queries_count += 1
statsd_client.gauge('manager.outdated_queries', outdated_queries_count)
logger.info("Done refreshing queries. Found %d outdated queries: %s" % (outdated_queries_count, query_ids))
status = redis_connection.hgetall('redash:status')
now = time.time()
redis_connection.hmset('redash:status', {
'outdated_queries_count': outdated_queries_count,
'last_refresh_at': now,
'query_ids': json.dumps(query_ids)
})
statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))
@celery.task(name="redash.tasks.cleanup_tasks")
def cleanup_tasks():
in_progress = QueryTaskTracker.all(QueryTaskTracker.IN_PROGRESS_LIST)
for tracker in in_progress:
result = AsyncResult(tracker.task_id)
# If the AsyncResult status is PENDING it means there is no celery task object for this tracker, and we can
# mark it as "dead":
if result.status == 'PENDING':
logging.info("In progress tracker for %s is no longer enqueued, cancelling (task: %s).",
tracker.query_hash, tracker.task_id)
_unlock(tracker.query_hash, tracker.data_source_id)
tracker.update(state='cancelled')
if result.ready():
logging.info("in progress tracker %s finished", tracker.query_hash)
_unlock(tracker.query_hash, tracker.data_source_id)
tracker.update(state='finished')
waiting = QueryTaskTracker.all(QueryTaskTracker.WAITING_LIST)
for tracker in waiting:
result = AsyncResult(tracker.task_id)
if result.ready():
logging.info("waiting tracker %s finished", tracker.query_hash)
_unlock(tracker.query_hash, tracker.data_source_id)
tracker.update(state='finished')
# Maintain constant size of the finished tasks list:
QueryTaskTracker.prune(QueryTaskTracker.DONE_LIST, 1000)
@celery.task(name="redash.tasks.cleanup_query_results")
def cleanup_query_results():
"""
Job to cleanup unused query results -- such that no query links to them anymore, and older than
settings.QUERY_RESULTS_MAX_AGE (a week by default, so it's less likely to be open in someone's browser and be used).
Each time the job deletes only settings.QUERY_RESULTS_CLEANUP_COUNT (100 by default) query results so it won't choke
the database in case of many such results.
"""
logging.info("Running query results clean up (removing maximum of %d unused results, that are %d days old or more)",
settings.QUERY_RESULTS_CLEANUP_COUNT, settings.QUERY_RESULTS_CLEANUP_MAX_AGE)
unused_query_results = models.QueryResult.unused(settings.QUERY_RESULTS_CLEANUP_MAX_AGE).limit(settings.QUERY_RESULTS_CLEANUP_COUNT)
deleted_count = models.QueryResult.query.filter(
models.QueryResult.id.in_(unused_query_results.subquery())
).delete(synchronize_session=False)
models.db.session.commit()
logger.info("Deleted %d unused query results.", deleted_count)
@celery.task(name="redash.tasks.refresh_schemas")
def refresh_schemas():
"""
Refreshes the data sources schemas.
"""
blacklist = [int(ds_id) for ds_id in redis_connection.smembers('data_sources:schema:blacklist') if ds_id]
global_start_time = time.time()
logger.info(u"task=refresh_schemas state=start")
for ds in models.DataSource.query:
if ds.paused:
logger.info(u"task=refresh_schema state=skip ds_id=%s reason=paused(%s)", ds.id, ds.pause_reason)
elif ds.id in blacklist:
logger.info(u"task=refresh_schema state=skip ds_id=%s reason=blacklist", ds.id)
else:
logger.info(u"task=refresh_schema state=start ds_id=%s", ds.id)
start_time = time.time()
try:
ds.get_schema(refresh=True)
logger.info(u"task=refresh_schema state=finished ds_id=%s runtime=%.2f", ds.id, time.time() - start_time)
except Exception:
logger.exception(u"Failed refreshing schema for the data source: %s", ds.name)
logger.info(u"task=refresh_schema state=failed ds_id=%s runtime=%.2f", ds.id, time.time() - start_time)
logger.info(u"task=refresh_schemas state=finish total_runtime=%.2f", time.time() - global_start_time)
def signal_handler(*args):
raise InterruptException
class QueryExecutionError(Exception):
pass
# We could have created this as a celery.Task derived class, and act as the task itself. But this might result in weird
# issues as the task class created once per process, so decided to have a plain object instead.
class QueryExecutor(object):
def __init__(self, task, query, data_source_id, user_id, metadata,
scheduled_query):
self.task = task
self.query = query
self.data_source_id = data_source_id
self.metadata = metadata
self.data_source = self._load_data_source()
if user_id is not None:
self.user = models.User.query.get(user_id)
else:
self.user = None
self.query_hash = gen_query_hash(self.query)
self.scheduled_query = scheduled_query
# Load existing tracker or create a new one if the job was created before code update:
self.tracker = QueryTaskTracker.get_by_task_id(task.request.id) or QueryTaskTracker.create(task.request.id,
'created',
self.query_hash,
self.data_source_id,
False, metadata)
def run(self):
signal.signal(signal.SIGINT, signal_handler)
self.tracker.update(started_at=time.time(), state='started')
logger.debug("Executing query:\n%s", self.query)
self._log_progress('executing_query')
query_runner = self.data_source.query_runner
annotated_query = self._annotate_query(query_runner)
try:
data, error = query_runner.run_query(annotated_query, self.user)
except Exception as e:
error = unicode(e)
data = None
logging.warning('Unexpected error while running query:', exc_info=1)
run_time = time.time() - self.tracker.started_at
self.tracker.update(error=error, run_time=run_time, state='saving_results')
logger.info(u"task=execute_query query_hash=%s data_length=%s error=[%s]", self.query_hash, data and len(data), error)
_unlock(self.query_hash, self.data_source.id)
if error:
self.tracker.update(state='failed')
result = QueryExecutionError(error)
if self.scheduled_query:
self.scheduled_query.schedule_failures += 1
models.db.session.add(self.scheduled_query)
else:
if (self.scheduled_query and
self.scheduled_query.schedule_failures > 0):
self.scheduled_query.schedule_failures = 0
models.db.session.add(self.scheduled_query)
query_result, updated_query_ids = models.QueryResult.store_result(
self.data_source.org, self.data_source,
self.query_hash, self.query, data,
run_time, utils.utcnow())
self._log_progress('checking_alerts')
for query_id in updated_query_ids:
check_alerts_for_query.delay(query_id)
self._log_progress('finished')
result = query_result.id
models.db.session.commit()
return result
def _annotate_query(self, query_runner):
if query_runner.annotate_query():
self.metadata['Task ID'] = self.task.request.id
self.metadata['Query Hash'] = self.query_hash
self.metadata['Queue'] = self.task.request.delivery_info['routing_key']
annotation = u", ".join([u"{}: {}".format(k, v) for k, v in self.metadata.iteritems()])
annotated_query = u"/* {} */ {}".format(annotation, self.query)
else:
annotated_query = self.query
return annotated_query
def _log_progress(self, state):
logger.info(
u"task=execute_query state=%s query_hash=%s type=%s ds_id=%d "
"task_id=%s queue=%s query_id=%s username=%s",
state, self.query_hash, self.data_source.type, self.data_source.id,
self.task.request.id,
self.task.request.delivery_info['routing_key'],
self.metadata.get('Query ID', 'unknown'),
self.metadata.get('Username', 'unknown'))
self.tracker.update(state=state)
def _load_data_source(self):
logger.info("task=execute_query state=load_ds ds_id=%d", self.data_source_id)
return models.DataSource.query.get(self.data_source_id)
# user_id is added last as a keyword argument for backward compatability -- to support executing previously submitted
# jobs before the upgrade to this version.
@celery.task(name="redash.tasks.execute_query", bind=True, track_started=True)
def execute_query(self, query, data_source_id, metadata, user_id=None,
scheduled_query_id=None):
if scheduled_query_id is not None:
scheduled_query = models.Query.query.get(scheduled_query_id)
else:
scheduled_query = None
return QueryExecutor(self, query, data_source_id, user_id, metadata,
scheduled_query).run()
#Update tokens for API data access
@celery.task(name="redash.tasks.refresh_query_tokens")
def refresh_query_tokens():
logger.warning("Refreshing User Tokens")
#Refresh Tokens
models.User.refresh_tokens()
#Send Emails
users = models.User.get_all()
for u in users:
user = u.to_dict()
send_api_token(user)
|
bsd-2-clause
|
dpryan79/tools-iuc
|
tools/repmatch_gff3/repmatch_gff3.py
|
25
|
3106
|
# repmatch.py
#
# Replicate matching - matches paired peaks from two or more replicates
#
# Input: one or more gff files (matched_peak output from cwpair2, each a list of paired peaks from a replicate
#
# Output: list of matched groups and list of unmatched peaks
# Files: statistics_table.tabular (file to replicate ID), matched_paired_peaks.tabular, detail.tabular, unmatched_peaks.tabular
import argparse
import repmatch_gff3_util
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', dest='inputs', action='append', nargs=2, help="Input datasets")
parser.add_argument('--method', dest='method', default='closest', help='Method of finding match')
parser.add_argument('--distance', dest='distance', type=int, default=50, help='Maximum distance between peaks in different replicates to allow merging')
parser.add_argument('--step', dest='step', type=int, default=0, help='Step size of distance for each iteration')
parser.add_argument('--replicates', dest='replicates', type=int, default=2, help='Minimum number of replicates that must be matched for merging to occur')
parser.add_argument('--low_limit', dest='low_limit', type=int, default=-1000, help='Lower limit for c-w distance filter')
parser.add_argument('--up_limit', dest='up_limit', type=int, default=1000, help='Upper limit for c-w distance filter')
parser.add_argument('--output_files', dest='output_files', default='all', help='Restrict output dataset collections.')
parser.add_argument('--output_matched_peaks', dest='output_matched_peaks', help='Matched groups in gff format')
parser.add_argument('--output_unmatched_peaks', dest='output_unmatched_peaks', default=None, help='Unmatched paired peaks in tabular format')
parser.add_argument('--output_detail', dest='output_detail', default=None, help='Details in tabular format')
parser.add_argument('--output_statistics_table', dest='output_statistics_table', default=None, help='Keys in tabular format')
parser.add_argument('--output_statistics_histogram', dest='output_statistics_histogram', default=None, help='Histogram')
args = parser.parse_args()
dataset_paths = []
hids = []
for (dataset_path, hid) in args.inputs:
dataset_paths.append(dataset_path)
hids.append(hid)
repmatch_gff3_util.process_files(dataset_paths,
hids,
args.method,
args.distance,
args.step,
args.replicates,
args.up_limit,
args.low_limit,
args.output_files,
args.output_matched_peaks,
args.output_unmatched_peaks,
args.output_detail,
args.output_statistics_table,
args.output_statistics_histogram)
|
mit
|
kenshay/ImageScripter
|
ProgramData/Android/ADB/platform-tools/systrace/catapult/telemetry/telemetry/internal/platform/win_platform_backend.py
|
5
|
15541
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.internal.util import atexit_with_log
import collections
import contextlib
import ctypes
import logging
import os
import platform
import re
import socket
import struct
import subprocess
import sys
import time
import zipfile
from py_utils import cloud_storage # pylint: disable=import-error
from telemetry.core import exceptions
from telemetry.core import os_version as os_version_module
from telemetry import decorators
from telemetry.internal.platform import desktop_platform_backend
from telemetry.internal.platform.power_monitor import msr_power_monitor
from telemetry.internal.util import path
try:
import pywintypes # pylint: disable=import-error
import win32api # pylint: disable=import-error
from win32com.shell import shell # pylint: disable=no-name-in-module
from win32com.shell import shellcon # pylint: disable=no-name-in-module
import win32con # pylint: disable=import-error
import win32file # pylint: disable=import-error
import win32gui # pylint: disable=import-error
import win32pipe # pylint: disable=import-error
import win32process # pylint: disable=import-error
try:
import winreg # pylint: disable=import-error
except ImportError:
import _winreg as winreg # pylint: disable=import-error
import win32security # pylint: disable=import-error
except ImportError:
pywintypes = None
shell = None
shellcon = None
win32api = None
win32con = None
win32file = None
win32gui = None
win32pipe = None
win32process = None
win32security = None
winreg = None
def _InstallWinRing0():
"""WinRing0 is used for reading MSRs."""
executable_dir = os.path.dirname(sys.executable)
python_is_64_bit = sys.maxsize > 2 ** 32
dll_file_name = 'WinRing0x64.dll' if python_is_64_bit else 'WinRing0.dll'
dll_path = os.path.join(executable_dir, dll_file_name)
os_is_64_bit = platform.machine().endswith('64')
driver_file_name = 'WinRing0x64.sys' if os_is_64_bit else 'WinRing0.sys'
driver_path = os.path.join(executable_dir, driver_file_name)
# Check for WinRing0 and download if needed.
if not (os.path.exists(dll_path) and os.path.exists(driver_path)):
win_binary_dir = os.path.join(
path.GetTelemetryDir(), 'bin', 'win', 'AMD64')
zip_path = os.path.join(win_binary_dir, 'winring0.zip')
cloud_storage.GetIfChanged(zip_path, bucket=cloud_storage.PUBLIC_BUCKET)
try:
with zipfile.ZipFile(zip_path, 'r') as zip_file:
error_message = (
'Failed to extract %s into %s. If python claims that '
'the zip file is locked, this may be a lie. The problem may be '
'that python does not have write permissions to the destination '
'directory.'
)
# Install DLL.
if not os.path.exists(dll_path):
try:
zip_file.extract(dll_file_name, executable_dir)
except:
logging.error(error_message % (dll_file_name, executable_dir))
raise
# Install kernel driver.
if not os.path.exists(driver_path):
try:
zip_file.extract(driver_file_name, executable_dir)
except:
logging.error(error_message % (driver_file_name, executable_dir))
raise
finally:
os.remove(zip_path)
def TerminateProcess(process_handle):
if not process_handle:
return
if win32process.GetExitCodeProcess(process_handle) == win32con.STILL_ACTIVE:
win32process.TerminateProcess(process_handle, 0)
process_handle.close()
class WinPlatformBackend(desktop_platform_backend.DesktopPlatformBackend):
def __init__(self):
super(WinPlatformBackend, self).__init__()
self._msr_server_handle = None
self._msr_server_port = None
self._power_monitor = msr_power_monitor.MsrPowerMonitorWin(self)
@classmethod
def IsPlatformBackendForHost(cls):
return sys.platform == 'win32'
def __del__(self):
self.close()
def close(self):
self.CloseMsrServer()
def CloseMsrServer(self):
if not self._msr_server_handle:
return
TerminateProcess(self._msr_server_handle)
self._msr_server_handle = None
self._msr_server_port = None
def IsThermallyThrottled(self):
raise NotImplementedError()
def HasBeenThermallyThrottled(self):
raise NotImplementedError()
def GetSystemCommitCharge(self):
performance_info = self._GetPerformanceInfo()
return performance_info.CommitTotal * performance_info.PageSize / 1024
@decorators.Cache
def GetSystemTotalPhysicalMemory(self):
performance_info = self._GetPerformanceInfo()
return performance_info.PhysicalTotal * performance_info.PageSize / 1024
def GetCpuStats(self, pid):
cpu_info = self._GetWin32ProcessInfo(win32process.GetProcessTimes, pid)
# Convert 100 nanosecond units to seconds
cpu_time = (cpu_info['UserTime'] / 1e7 +
cpu_info['KernelTime'] / 1e7)
return {'CpuProcessTime': cpu_time}
def GetCpuTimestamp(self):
"""Return current timestamp in seconds."""
return {'TotalTime': time.time()}
@decorators.Deprecated(
2017, 11, 4,
'Clients should use tracing and memory-infra in new Telemetry '
'benchmarks. See for context: https://crbug.com/632021')
def GetMemoryStats(self, pid):
memory_info = self._GetWin32ProcessInfo(
win32process.GetProcessMemoryInfo, pid)
return {'VM': memory_info['PagefileUsage'],
'VMPeak': memory_info['PeakPagefileUsage'],
'WorkingSetSize': memory_info['WorkingSetSize'],
'WorkingSetSizePeak': memory_info['PeakWorkingSetSize']}
def KillProcess(self, pid, kill_process_tree=False):
# os.kill for Windows is Python 2.7.
cmd = ['taskkill', '/F', '/PID', str(pid)]
if kill_process_tree:
cmd.append('/T')
subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()
def GetSystemProcessInfo(self):
# [3:] To skip 2 blank lines and header.
lines = subprocess.Popen(
['wmic', 'process', 'get',
'CommandLine,CreationDate,Name,ParentProcessId,ProcessId',
'/format:csv'],
stdout=subprocess.PIPE).communicate()[0].splitlines()[3:]
process_info = []
for line in lines:
if not line:
continue
parts = line.split(',')
pi = {}
pi['ProcessId'] = int(parts[-1])
pi['ParentProcessId'] = int(parts[-2])
pi['Name'] = parts[-3]
creation_date = None
if parts[-4]:
creation_date = float(re.split('[+-]', parts[-4])[0])
pi['CreationDate'] = creation_date
pi['CommandLine'] = ','.join(parts[1:-4])
process_info.append(pi)
return process_info
def GetChildPids(self, pid):
"""Retunds a list of child pids of |pid|."""
ppid_map = collections.defaultdict(list)
creation_map = {}
for pi in self.GetSystemProcessInfo():
ppid_map[pi['ParentProcessId']].append(pi['ProcessId'])
if pi['CreationDate']:
creation_map[pi['ProcessId']] = pi['CreationDate']
def _InnerGetChildPids(pid):
if not pid or pid not in ppid_map:
return []
ret = [p for p in ppid_map[pid] if creation_map[p] >= creation_map[pid]]
for child in ret:
if child == pid:
continue
ret.extend(_InnerGetChildPids(child))
return ret
return _InnerGetChildPids(pid)
def GetCommandLine(self, pid):
for pi in self.GetSystemProcessInfo():
if pid == pi['ProcessId']:
return pi['CommandLine']
raise exceptions.ProcessGoneException()
@decorators.Cache
def GetArchName(self):
return platform.machine()
def GetOSName(self):
return 'win'
@decorators.Cache
def GetOSVersionName(self):
os_version = platform.uname()[3]
if os_version.startswith('5.1.'):
return os_version_module.XP
if os_version.startswith('6.0.'):
return os_version_module.VISTA
if os_version.startswith('6.1.'):
return os_version_module.WIN7
# The version of python.exe we commonly use (2.7) is only manifested as
# being compatible with Windows versions up to 8. Therefore Windows *lies*
# to python about the version number to keep it runnable on Windows 10.
key_name = r'Software\Microsoft\Windows NT\CurrentVersion'
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key_name)
try:
value, _ = winreg.QueryValueEx(key, 'CurrentMajorVersionNumber')
except OSError:
value = None
finally:
key.Close()
if value == 10:
return os_version_module.WIN10
elif os_version.startswith('6.2.'):
return os_version_module.WIN8
elif os_version.startswith('6.3.'):
return os_version_module.WIN81
raise NotImplementedError(
'Unknown win version: %s, CurrentMajorVersionNumber: %s' %
(os_version, value))
def CanFlushIndividualFilesFromSystemCache(self):
return True
def _GetWin32ProcessInfo(self, func, pid):
mask = (win32con.PROCESS_QUERY_INFORMATION |
win32con.PROCESS_VM_READ)
handle = None
try:
handle = win32api.OpenProcess(mask, False, pid)
return func(handle)
except pywintypes.error, e:
errcode = e[0]
if errcode == 87:
raise exceptions.ProcessGoneException()
raise
finally:
if handle:
win32api.CloseHandle(handle)
def _GetPerformanceInfo(self):
class PerformanceInfo(ctypes.Structure):
"""Struct for GetPerformanceInfo() call
http://msdn.microsoft.com/en-us/library/ms683210
"""
_fields_ = [('size', ctypes.c_ulong),
('CommitTotal', ctypes.c_size_t),
('CommitLimit', ctypes.c_size_t),
('CommitPeak', ctypes.c_size_t),
('PhysicalTotal', ctypes.c_size_t),
('PhysicalAvailable', ctypes.c_size_t),
('SystemCache', ctypes.c_size_t),
('KernelTotal', ctypes.c_size_t),
('KernelPaged', ctypes.c_size_t),
('KernelNonpaged', ctypes.c_size_t),
('PageSize', ctypes.c_size_t),
('HandleCount', ctypes.c_ulong),
('ProcessCount', ctypes.c_ulong),
('ThreadCount', ctypes.c_ulong)]
def __init__(self):
self.size = ctypes.sizeof(self)
# pylint: disable=bad-super-call
super(PerformanceInfo, self).__init__()
performance_info = PerformanceInfo()
ctypes.windll.psapi.GetPerformanceInfo(
ctypes.byref(performance_info), performance_info.size)
return performance_info
def IsCurrentProcessElevated(self):
if self.GetOSVersionName() < os_version_module.VISTA:
# TOKEN_QUERY is not defined before Vista. All processes are elevated.
return True
handle = win32process.GetCurrentProcess()
with contextlib.closing(
win32security.OpenProcessToken(handle, win32con.TOKEN_QUERY)) as token:
return bool(win32security.GetTokenInformation(
token, win32security.TokenElevation))
def LaunchApplication(
self, application, parameters=None, elevate_privilege=False):
"""Launch an application. Returns a PyHANDLE object."""
parameters = ' '.join(parameters) if parameters else ''
if elevate_privilege and not self.IsCurrentProcessElevated():
# Use ShellExecuteEx() instead of subprocess.Popen()/CreateProcess() to
# elevate privileges. A new console will be created if the new process has
# different permissions than this process.
proc_info = shell.ShellExecuteEx(
fMask=shellcon.SEE_MASK_NOCLOSEPROCESS | shellcon.SEE_MASK_NO_CONSOLE,
lpVerb='runas' if elevate_privilege else '',
lpFile=application,
lpParameters=parameters,
nShow=win32con.SW_HIDE)
if proc_info['hInstApp'] <= 32:
raise Exception('Unable to launch %s' % application)
return proc_info['hProcess']
else:
handle, _, _, _ = win32process.CreateProcess(
None, application + ' ' + parameters, None, None, False,
win32process.CREATE_NO_WINDOW, None, None, win32process.STARTUPINFO())
return handle
def CanMonitorPower(self):
return self._power_monitor.CanMonitorPower()
def CanMeasurePerApplicationPower(self):
return self._power_monitor.CanMeasurePerApplicationPower()
def StartMonitoringPower(self, browser):
self._power_monitor.StartMonitoringPower(browser)
def StopMonitoringPower(self):
return self._power_monitor.StopMonitoringPower()
def _StartMsrServerIfNeeded(self):
if self._msr_server_handle:
return
_InstallWinRing0()
pipe_name = r"\\.\pipe\msr_server_pipe_{}".format(os.getpid())
# Try to open a named pipe to receive a msr port number from server process.
pipe = win32pipe.CreateNamedPipe(
pipe_name,
win32pipe.PIPE_ACCESS_INBOUND,
win32pipe.PIPE_TYPE_MESSAGE | win32pipe.PIPE_WAIT,
1, 32, 32, 300, None)
parameters = (
os.path.join(os.path.dirname(__file__), 'msr_server_win.py'),
pipe_name,
)
self._msr_server_handle = self.LaunchApplication(
sys.executable, parameters, elevate_privilege=True)
if pipe != win32file.INVALID_HANDLE_VALUE:
if win32pipe.ConnectNamedPipe(pipe, None) == 0:
self._msr_server_port = int(win32file.ReadFile(pipe, 32)[1])
win32api.CloseHandle(pipe)
# Wait for server to start.
try:
socket.create_connection(('127.0.0.1', self._msr_server_port), 5).close()
except socket.error:
self.CloseMsrServer()
atexit_with_log.Register(TerminateProcess, self._msr_server_handle)
def ReadMsr(self, msr_number, start=0, length=64):
self._StartMsrServerIfNeeded()
if not self._msr_server_handle:
raise OSError('Unable to start MSR server.')
sock = socket.create_connection(('127.0.0.1', self._msr_server_port), 5)
try:
sock.sendall(struct.pack('I', msr_number))
response = sock.recv(8)
finally:
sock.close()
return struct.unpack('Q', response)[0] >> start & ((1 << length) - 1)
def IsCooperativeShutdownSupported(self):
return True
def CooperativelyShutdown(self, proc, app_name):
pid = proc.pid
# http://timgolden.me.uk/python/win32_how_do_i/
# find-the-window-for-my-subprocess.html
#
# It seems that intermittently this code manages to find windows
# that don't belong to Chrome -- for example, the cmd.exe window
# running slave.bat on the tryservers. Try to be careful about
# finding only Chrome's windows. This works for both the browser
# and content_shell.
#
# It seems safest to send the WM_CLOSE messages after discovering
# all of the sub-process's windows.
def find_chrome_windows(hwnd, hwnds):
_, win_pid = win32process.GetWindowThreadProcessId(hwnd)
if (pid == win_pid and
win32gui.IsWindowVisible(hwnd) and
win32gui.IsWindowEnabled(hwnd) and
win32gui.GetClassName(hwnd).lower().startswith(app_name)):
hwnds.append(hwnd)
return True
hwnds = []
win32gui.EnumWindows(find_chrome_windows, hwnds)
if hwnds:
for hwnd in hwnds:
win32gui.SendMessage(hwnd, win32con.WM_CLOSE, 0, 0)
return True
else:
logging.info('Did not find any windows owned by target process')
return False
|
gpl-3.0
|
mattuuh7/incubator-airflow
|
airflow/contrib/hooks/datastore_hook.py
|
44
|
4342
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from apiclient.discovery import build
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
class DatastoreHook(GoogleCloudBaseHook):
"""
Interact with Google Cloud Datastore. This hook uses the Google Cloud Platform
connection.
This object is not threads safe. If you want to make multiple requests
simultaniously, you will need to create a hook per thread.
"""
def __init__(self,
datastore_conn_id='google_cloud_datastore_default',
delegate_to=None):
super(DatastoreHook, self).__init__(datastore_conn_id, delegate_to)
# datasetId is the same as the project name
self.dataset_id = self._get_field('project')
self.connection = self.get_conn()
def get_conn(self):
"""
Returns a Google Cloud Storage service object.
"""
http_authorized = self._authorize()
return build('datastore', 'v1beta2', http=http_authorized)
def allocate_ids(self, partialKeys):
"""
Allocate IDs for incomplete keys.
see https://cloud.google.com/datastore/docs/apis/v1beta2/datasets/allocateIds
:param partialKeys: a list of partial keys
:return: a list of full keys.
"""
resp = self.connection.datasets().allocateIds(datasetId=self.dataset_id, body={'keys': partialKeys}).execute()
return resp['keys']
def begin_transaction(self):
"""
Get a new transaction handle
see https://cloud.google.com/datastore/docs/apis/v1beta2/datasets/beginTransaction
:return: a transaction handle
"""
resp = self.connection.datasets().beginTransaction(datasetId=self.dataset_id, body={}).execute()
return resp['transaction']
def commit(self, body):
"""
Commit a transaction, optionally creating, deleting or modifying some entities.
see https://cloud.google.com/datastore/docs/apis/v1beta2/datasets/commit
:param body: the body of the commit request
:return: the response body of the commit request
"""
resp = self.connection.datasets().commit(datasetId=self.dataset_id, body=body).execute()
return resp
def lookup(self, keys, read_consistency=None, transaction=None):
"""
Lookup some entities by key
see https://cloud.google.com/datastore/docs/apis/v1beta2/datasets/lookup
:param keys: the keys to lookup
:param read_consistency: the read consistency to use. default, strong or eventual.
Cannot be used with a transaction.
:param transaction: the transaction to use, if any.
:return: the response body of the lookup request.
"""
body = {'keys': keys}
if read_consistency:
body['readConsistency'] = read_consistency
if transaction:
body['transaction'] = transaction
return self.connection.datasets().lookup(datasetId=self.dataset_id, body=body).execute()
def rollback(self, transaction):
"""
Roll back a transaction
see https://cloud.google.com/datastore/docs/apis/v1beta2/datasets/rollback
:param transaction: the transaction to roll back
"""
self.connection.datasets().rollback(datasetId=self.dataset_id, body={'transaction': transaction})\
.execute()
def run_query(self, body):
"""
Run a query for entities.
see https://cloud.google.com/datastore/docs/apis/v1beta2/datasets/runQuery
:param body: the body of the query request
:return: the batch of query results.
"""
resp = self.connection.datasets().runQuery(datasetId=self.dataset_id, body=body).execute()
return resp['batch']
|
apache-2.0
|
mkrzewic/AliPhysics
|
PWGJE/EMCALJetTasks/Tracks/analysis/write/RawSpectrumWriter.py
|
41
|
3421
|
#**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
from PWGJE.EMCALJetTasks.Tracks.analysis.base.FileHandler import LegoTrainFileReader
from PWGJE.EMCALJetTasks.Tracks.analysis.base.SpectrumFitter import MinBiasFitter
from ROOT import TList, TFile, TObject
class RawSpectrumWriter(object):
def __init__(self):
self.__categories = {"Full":{}, "EMCAL":{}}
def AddTriggerToCategory(self, category, trigger, spectrum):
self.__categories[category][trigger] = spectrum
def Process(self, filename):
reader = LegoTrainFileReader(filename)
results = reader.ReadFile()
categories = {"Full":"tracksAll", "EMCAL":"tracksWithClusters"}
for trigger in ["MinBias", "EMCJHigh", "EMCJLow", "EMCGHigh", "EMCGLow"]:
data = results.GetData(trigger)
for stype,cont in categories.iteritems():
spectrum = self.MakeNormalisedSpectrum(data.FindTrackContainer(cont), trigger, stype)
self.AddTriggerToCategory(stype, trigger, spectrum)
if trigger == "MinBias":
self.AddTriggerToCategory(stype, "MinBiasFit", self.FitMinBias(spectrum, stype))
def FitMinBias(self, spectrum, category):
fitter = MinBiasFitter("mbfitter", spectrum)
param = fitter.MakeBinnedParameterisationDefault(True)
param.SetName("FitMinBias%s" %(category))
return param
def MakeNormalisedSpectrum(self, spectrum, trigger, category):
spectrum.SetVertexRange(-10., 10.)
spectrum.SetPileupRejection(True)
spectrum.SelectTrackCuts(1)
return spectrum.MakeProjection(0, "RawSpectrum%s%s" %(trigger, category))
def WriteToFile(self, outputname):
outputlists = []
for categ in self.__categories.keys():
mylist = TList()
mylist.SetName(categ)
for entry in self.__categories[categ].itervalues():
mylist.Add(entry)
outputlists.append(mylist)
outputfile = TFile(outputname, "RECREATE")
outputfile.cd()
for myobject in outputlists:
myobject.Write(myobject.GetName(), TObject.kSingleKey)
outputfile.Close()
def Create(filename):
writer = RawSpectrumWriter()
writer.Process(filename)
writer.WriteToFile("normspectra.root")
|
bsd-3-clause
|
daliwangi/bitcoin
|
test/functional/combine_logs.py
|
69
|
4611
|
#!/usr/bin/env python3
"""Combine logs from multiple bitcoin nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import re
import sys
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{6}")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(usage='%(prog)s [options] <test temporary directory>', description=__doc__)
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args, unknown_args = parser.parse_known_args()
if args.color and os.name != 'posix':
print("Color output requires posix terminal colors.")
sys.exit(1)
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
# There should only be one unknown argument - the path of the temporary test directory
if len(unknown_args) != 1:
print("Unexpected arguments" + str(unknown_args))
sys.exit(1)
log_events = read_logs(unknown_args[0])
print_logs(log_events, color=args.color, html=args.html)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/regtest/debug.log".format(tmp_dir, i)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
event = line
timestamp = time_match.group()
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
event += "\n" + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs(log_events, color=False, html=False):
"""Renders the iterator of log events into text or html."""
if not html:
colors = defaultdict(lambda: '')
if color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
for event in log_events:
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, event.event, colors["reset"]))
else:
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
|
mit
|
kashif/chainer
|
tests/chainer_tests/initializer_tests/test_init.py
|
8
|
1230
|
import unittest
from chainer import initializers
from chainer import testing
import numpy
class TestGetInitializer(unittest.TestCase):
def test_scalar(self):
init = initializers._get_initializer(10)
self.assertIsInstance(init, initializers.Constant)
x = numpy.empty((2, 3), dtype=numpy.int32)
init(x)
expected = numpy.full((2, 3), 10, dtype=numpy.int32)
numpy.testing.assert_array_equal(x, expected)
def test_numpy_array(self):
c = numpy.array([1, 2, 3])
init = initializers._get_initializer(c)
self.assertIsInstance(init, initializers.Constant)
x = numpy.empty((3,), dtype=numpy.int32)
init(x)
expected = numpy.array([1, 2, 3], dtype=numpy.int32)
numpy.testing.assert_array_equal(x, expected)
def test_callable(self):
def initializer(arr):
arr[...] = 100
init = initializers._get_initializer(initializer)
self.assertTrue(callable(init))
x = numpy.empty((2, 3), dtype=numpy.int32)
init(x)
expected = numpy.full((2, 3), 100, dtype=numpy.int32)
numpy.testing.assert_array_equal(x, expected)
testing.run_module(__name__, __file__)
|
mit
|
mancoast/CPythonPyc_test
|
fail/323_profilee.py
|
398
|
3041
|
"""
Input for test_profile.py and test_cprofile.py.
IMPORTANT: This stuff is touchy. If you modify anything above the
test class you'll have to regenerate the stats by running the two
test files.
*ALL* NUMBERS in the expected output are relevant. If you change
the formatting of pstats, please don't just regenerate the expected
output without checking very carefully that not a single number has
changed.
"""
import sys
# In order to have reproducible time, we simulate a timer in the global
# variable 'TICKS', which represents simulated time in milliseconds.
# (We can't use a helper function increment the timer since it would be
# included in the profile and would appear to consume all the time.)
TICKS = 42000
def timer():
return TICKS
def testfunc():
# 1 call
# 1000 ticks total: 270 ticks local, 730 ticks in subfunctions
global TICKS
TICKS += 99
helper() # 300
helper() # 300
TICKS += 171
factorial(14) # 130
def factorial(n):
# 23 calls total
# 170 ticks total, 150 ticks local
# 3 primitive calls, 130, 20 and 20 ticks total
# including 116, 17, 17 ticks local
global TICKS
if n > 0:
TICKS += n
return mul(n, factorial(n-1))
else:
TICKS += 11
return 1
def mul(a, b):
# 20 calls
# 1 tick, local
global TICKS
TICKS += 1
return a * b
def helper():
# 2 calls
# 300 ticks total: 20 ticks local, 260 ticks in subfunctions
global TICKS
TICKS += 1
helper1() # 30
TICKS += 2
helper1() # 30
TICKS += 6
helper2() # 50
TICKS += 3
helper2() # 50
TICKS += 2
helper2() # 50
TICKS += 5
helper2_indirect() # 70
TICKS += 1
def helper1():
# 4 calls
# 30 ticks total: 29 ticks local, 1 tick in subfunctions
global TICKS
TICKS += 10
hasattr(C(), "foo") # 1
TICKS += 19
lst = []
lst.append(42) # 0
sys.exc_info() # 0
def helper2_indirect():
helper2() # 50
factorial(3) # 20
def helper2():
# 8 calls
# 50 ticks local: 39 ticks local, 11 ticks in subfunctions
global TICKS
TICKS += 11
hasattr(C(), "bar") # 1
TICKS += 13
subhelper() # 10
TICKS += 15
def subhelper():
# 8 calls
# 10 ticks total: 8 ticks local, 2 ticks in subfunctions
global TICKS
TICKS += 2
for i in range(2): # 0
try:
C().foo # 1 x 2
except AttributeError:
TICKS += 3 # 3 x 2
class C:
def __getattr__(self, name):
# 28 calls
# 1 tick, local
global TICKS
TICKS += 1
raise AttributeError
|
gpl-3.0
|
dios-game/dios-cocos
|
src/oslibs/cocos/cocos-src/tools/cocos2d-console/plugins/plugin_jscompile/__init__.py
|
1
|
12716
|
#!/usr/bin/python
# ----------------------------------------------------------------------------
# cocos "jscompile" plugin
#
# Copyright 2013 (C) Intel
#
# License: MIT
# ----------------------------------------------------------------------------
'''
"jscompile" plugin for cocos command line tool
'''
__docformat__ = 'restructuredtext'
import sys
import subprocess
import os
import json
import inspect
import platform
import cocos
from MultiLanguage import MultiLanguage
class CCPluginJSCompile(cocos.CCPlugin):
"""
compiles (encodes) and minifies JS files
"""
@staticmethod
def plugin_name():
return "jscompile"
@staticmethod
def brief_description():
# returns a short description of this module
return MultiLanguage.get_string('JSCOMPILE_BRIEF')
# This is not the constructor, just an initializator
def init(self, options, workingdir):
"""
Arguments:
- `options`:
"""
self._current_src_dir = None
self._src_dir_arr = self.normalize_path_in_list(options.src_dir_arr)
self._dst_dir = options.dst_dir
self._use_closure_compiler = options.use_closure_compiler
self._verbose = options.verbose
self._config = None
self._workingdir = workingdir
self._closure_params = ''
if options.compiler_config != None:
f = open(options.compiler_config)
self._config = json.load(f)
f.close()
self._pre_order = self._config["pre_order"]
self.normalize_path_in_list(self._pre_order)
self._post_order = self._config["post_order"]
self.normalize_path_in_list(self._post_order)
self._skip = self._config["skip"]
self.normalize_path_in_list(self._skip)
self._closure_params = self._config["closure_params"]
if options.closure_params is not None:
self._closure_params = options.closure_params
self._js_files = {}
self._compressed_js_path = os.path.join(self._dst_dir, options.compressed_filename)
self._compressed_jsc_path = os.path.join(self._dst_dir, options.compressed_filename+"c")
def normalize_path_in_list(self, list):
for i in list:
tmp = os.path.normpath(i)
list[list.index(i)] = tmp
return list
def get_relative_path(self, jsfile):
try:
# print "current src dir: "+self._current_src_dir)
pos = jsfile.index(self._current_src_dir)
if pos != 0:
raise cocos.CCPluginError(MultiLanguage.get_string('LUACOMPILE_ERROR_SRCDIR_NAME_NOT_FOUND'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
# print "origin js path: "+ jsfile
# print "relative path: "+jsfile[len(self._current_src_dir)+1:]
return jsfile[len(self._current_src_dir)+1:]
except ValueError:
raise cocos.CCPluginError(MultiLanguage.get_string('LUACOMPILE_ERROR_SRCDIR_NAME_NOT_FOUND'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
def get_output_file_path(self, jsfile):
"""
Gets output file path by source js file
"""
# create folder for generated file
jsc_filepath = ""
relative_path = self.get_relative_path(jsfile)+"c"
jsc_filepath = os.path.join(self._dst_dir, relative_path)
dst_rootpath = os.path.split(jsc_filepath)[0]
try:
# print "creating dir (%s)" % (dst_rootpath)
os.makedirs(dst_rootpath)
except OSError:
if os.path.exists(dst_rootpath) == False:
# There was an error on creation, so make sure we know about it
raise cocos.CCPluginError(MultiLanguage.get_string('LUACOMPILE_ERROR_MKDIR_FAILED_FMT', dst_rootpath),
cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
# print "return jsc path: "+jsc_filepath
return jsc_filepath
def compile_js(self, jsfile, output_file):
"""
Compiles js file
"""
cocos.Logging.debug(MultiLanguage.get_string('JSCOMPILE_DEBUG_COMPILE_FILE_FMT', jsfile))
jsbcc_exe_path = ""
if(cocos.os_is_linux()):
if(platform.architecture()[0] == "32bit"):
jsbcc_exe_path = os.path.join(self._workingdir, "bin", "linux", "jsbcc_x86")
else:
jsbcc_exe_path = os.path.join(self._workingdir, "bin", "linux", "jsbcc_x64")
else:
jsbcc_exe_path = os.path.join(self._workingdir, "bin", "jsbcc")
cmd_str = "\"%s\" \"%s\" \"%s\"" % (jsbcc_exe_path, jsfile, output_file)
self._run_cmd(cmd_str)
def compress_js(self):
"""
Compress all js files into one big file.
"""
jsfiles = ""
for src_dir in self._src_dir_arr:
# print "\n----------src:"+src_dir
jsfiles = jsfiles + " --js ".join(self._js_files[src_dir]) + " "
compiler_jar_path = os.path.join(self._workingdir, "bin", "compiler.jar")
command = "java -jar \"%s\" %s --js %s --js_output_file \"%s\"" % (compiler_jar_path, self._closure_params, jsfiles, self._compressed_js_path)
self._run_cmd(command)
def deep_iterate_dir(self, rootDir):
for lists in os.listdir(rootDir):
path = os.path.join(rootDir, lists)
if os.path.isdir(path):
self.deep_iterate_dir(path)
elif os.path.isfile(path):
if os.path.splitext(path)[1] == ".js":
self._js_files[self._current_src_dir].append(path)
def index_in_list(self, jsfile, l):
"""
Arguments:
- `self`:
- `jsfile`:
- `l`:
"""
index = -1
for el in l:
if jsfile.rfind(el) != -1:
# print "index:"+str(index+1)+", el:"+el
return index+1
index = index + 1
return -1
def js_filename_pre_order_compare(self, a, b):
return self._js_filename_compare(a, b, self._pre_order, 1)
def js_filename_post_order_compare(self, a, b):
return self._js_filename_compare(a, b, self._post_order, -1)
def _js_filename_compare(self, a, b, files, delta):
index_a = self.index_in_list(a, files)
index_b = self.index_in_list(b, files)
is_a_in_list = index_a != -1
is_b_in_list = index_b != -1
if is_a_in_list and not is_b_in_list:
return -1 * delta
elif not is_a_in_list and is_b_in_list:
return 1 * delta
elif is_a_in_list and is_b_in_list:
if index_a > index_b:
return 1
elif index_a < index_b:
return -1
else:
return 0
else:
return 0
def reorder_js_files(self):
if self._config == None:
return
# print "before:"+str(self._js_files)
for src_dir in self._js_files:
# Remove file in exclude list
need_remove_arr = []
for jsfile in self._js_files[src_dir]:
for exclude_file in self._skip:
if jsfile.rfind(exclude_file) != -1:
# print "remove:" + jsfile
need_remove_arr.append(jsfile)
for need_remove in need_remove_arr:
self._js_files[src_dir].remove(need_remove)
self._js_files[src_dir].sort(cmp=self.js_filename_pre_order_compare)
self._js_files[src_dir].sort(cmp=self.js_filename_post_order_compare)
# print '-------------------'
# print "after:" + str(self._js_files)
def handle_all_js_files(self):
"""
Arguments:
- `self`:
"""
if self._use_closure_compiler == True:
cocos.Logging.info(MultiLanguage.get_string('JSCOMPILE_INFO_COMPRESS_TIP'))
self.compress_js()
self.compile_js(self._compressed_js_path, self._compressed_jsc_path)
# remove tmp compressed file
os.remove(self._compressed_js_path)
else:
cocos.Logging.info(MultiLanguage.get_string('JSCOMPILE_INFO_COMPILE_TO_BYTECODE'))
for src_dir in self._src_dir_arr:
for jsfile in self._js_files[src_dir]:
self._current_src_dir = src_dir
self.compile_js(jsfile, self.get_output_file_path(jsfile))
# will be called from the cocos.py script
def run(self, argv, dependencies):
"""
"""
self.parse_args(argv)
# create output directory
try:
os.makedirs(self._dst_dir)
except OSError:
if os.path.exists(self._dst_dir) == False:
raise cocos.CCPluginError(MultiLanguage.get_string('LUACOMPILE_ERROR_MKDIR_FAILED_FMT', self._dst_dir),
cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
# download the bin folder
jsbcc_exe_path = os.path.join(self._workingdir, "bin", "jsbcc")
if not os.path.exists(jsbcc_exe_path):
download_cmd_path = os.path.join(self._workingdir, os.pardir, os.pardir)
subprocess.call("python %s -f -r no" % (os.path.join(download_cmd_path, "download-bin.py")), shell=True, cwd=download_cmd_path)
# deep iterate the src directory
for src_dir in self._src_dir_arr:
self._current_src_dir = src_dir
self._js_files[self._current_src_dir] = []
self.deep_iterate_dir(src_dir)
self.reorder_js_files()
self.handle_all_js_files()
cocos.Logging.info(MultiLanguage.get_string('LUACOMPILE_INFO_FINISHED'))
def parse_args(self, argv):
"""
"""
from argparse import ArgumentParser
parser = ArgumentParser(prog="cocos %s" % self.__class__.plugin_name(),
description=self.__class__.brief_description())
parser.add_argument("-v", "--verbose",
action="store_true",
dest="verbose",
help=MultiLanguage.get_string('LUACOMPILE_ARG_VERBOSE'))
parser.add_argument("-s", "--src",
action="append", dest="src_dir_arr",
help=MultiLanguage.get_string('JSCOMPILE_ARG_SRC'))
parser.add_argument("-d", "--dst",
action="store", dest="dst_dir",
help=MultiLanguage.get_string('JSCOMPILE_ARG_DST'))
parser.add_argument("-c", "--use_closure_compiler",
action="store_true", dest="use_closure_compiler", default=False,
help=MultiLanguage.get_string('JSCOMPILE_ARG_CLOSURE'))
parser.add_argument("-o", "--output_compressed_filename",
action="store", dest="compressed_filename", default="game.min.js",
help=MultiLanguage.get_string('JSCOMPILE_ARG_OUT_FILE_NAME'))
parser.add_argument("-j", "--compiler_config",
action="store", dest="compiler_config",
help=MultiLanguage.get_string('JSCOMPILE_ARG_JSON_FILE'))
parser.add_argument("-m", "--closure_params",
action="store", dest="closure_params",
help=MultiLanguage.get_string('JSCOMPILE_ARG_EXTRA_PARAM'))
options = parser.parse_args(argv)
if options.src_dir_arr == None:
raise cocos.CCPluginError(MultiLanguage.get_string('JSCOMPILE_ERROR_SRC_NOT_SPECIFIED'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
elif options.dst_dir == None:
raise cocos.CCPluginError(MultiLanguage.get_string('LUACOMPILE_ERROR_DST_NOT_SPECIFIED'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
else:
for src_dir in options.src_dir_arr:
if os.path.exists(src_dir) == False:
raise cocos.CCPluginError(MultiLanguage.get_string('LUACOMPILE_ERROR_DIR_NOT_EXISTED_FMT',
(src_dir)),
cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
# script directory
if getattr(sys, 'frozen', None):
workingdir = os.path.realpath(os.path.dirname(sys.executable))
else:
workingdir = os.path.realpath(os.path.dirname(__file__))
self.init(options, workingdir)
|
mit
|
FlaPer87/django-nonrel
|
django/core/files/temp.py
|
536
|
1819
|
"""
The temp module provides a NamedTemporaryFile that can be re-opened on any
platform. Most platforms use the standard Python tempfile.TemporaryFile class,
but MS Windows users are given a custom class.
This is needed because in Windows NT, the default implementation of
NamedTemporaryFile uses the O_TEMPORARY flag, and thus cannot be reopened [1].
1: http://mail.python.org/pipermail/python-list/2005-December/359474.html
"""
import os
import tempfile
from django.core.files.utils import FileProxyMixin
__all__ = ('NamedTemporaryFile', 'gettempdir',)
if os.name == 'nt':
class TemporaryFile(FileProxyMixin):
"""
Temporary file object constructor that works in Windows and supports
reopening of the temporary file in windows.
"""
def __init__(self, mode='w+b', bufsize=-1, suffix='', prefix='',
dir=None):
fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
dir=dir)
self.name = name
self.file = os.fdopen(fd, mode, bufsize)
self.close_called = False
# Because close can be called during shutdown
# we need to cache os.unlink and access it
# as self.unlink only
unlink = os.unlink
def close(self):
if not self.close_called:
self.close_called = True
try:
self.file.close()
except (OSError, IOError):
pass
try:
self.unlink(self.name)
except (OSError):
pass
def __del__(self):
self.close()
NamedTemporaryFile = TemporaryFile
else:
NamedTemporaryFile = tempfile.NamedTemporaryFile
gettempdir = tempfile.gettempdir
|
bsd-3-clause
|
wnt-zhp/hufce
|
django/utils/timezone.py
|
81
|
8011
|
"""Timezone helper functions.
This module uses pytz when it's available and fallbacks when it isn't.
"""
from datetime import datetime, timedelta, tzinfo
from threading import local
import time as _time
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
__all__ = [
'utc', 'get_default_timezone', 'get_current_timezone',
'activate', 'deactivate', 'override',
'is_naive', 'is_aware', 'make_aware', 'make_naive',
]
# UTC and local time zones
ZERO = timedelta(0)
class UTC(tzinfo):
"""
UTC implementation taken from Python's docs.
Used only when pytz isn't available.
"""
def __repr__(self):
return "<UTC>"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
class LocalTimezone(tzinfo):
"""
Local time implementation taken from Python's docs.
Used only when pytz isn't available, and most likely inaccurate. If you're
having trouble with this class, don't waste your time, just install pytz.
"""
def __init__(self):
# This code is moved in __init__ to execute it as late as possible
# See get_default_timezone().
self.STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
self.DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
tzinfo.__init__(self)
def __repr__(self):
return "<LocalTimezone>"
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
utc = pytz.utc if pytz else UTC()
"""UTC time zone as a tzinfo instance."""
# In order to avoid accessing the settings at compile time,
# wrap the expression in a function and cache the result.
# If you change settings.TIME_ZONE in tests, reset _localtime to None.
_localtime = None
def get_default_timezone():
"""
Returns the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
See also :func:`get_current_timezone`.
"""
global _localtime
if _localtime is None:
if isinstance(settings.TIME_ZONE, basestring) and pytz is not None:
_localtime = pytz.timezone(settings.TIME_ZONE)
else:
_localtime = LocalTimezone()
return _localtime
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""
Returns the name of the default time zone.
"""
return _get_timezone_name(get_default_timezone())
_active = local()
def get_current_timezone():
"""
Returns the currently active time zone as a tzinfo instance.
"""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""
Returns the name of the currently active time zone.
"""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""
Returns the name of ``timezone``.
"""
try:
# for pytz timezones
return timezone.zone
except AttributeError:
# for regular tzinfo objects
local_now = datetime.now(timezone)
return timezone.tzname(local_now)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Sets the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name. If it is a time zone name, pytz is required.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, basestring) and pytz is not None:
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unsets the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(object):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses ``~django.utils.timezone.activate()``
to set the timezone on entry, and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, Django enables the default time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
self.old_timezone = getattr(_active, 'value', None)
def __enter__(self):
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is not None:
_active.value = self.old_timezone
else:
del _active.value
# Templates
def localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
if (isinstance(value, datetime)
and (settings.USE_TZ if use_tz is None else use_tz)
and not is_naive(value)
and getattr(value, 'convert_to_local_time', True)):
timezone = get_current_timezone()
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value
# Utilities
def now():
"""
Returns an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
if settings.USE_TZ:
# timeit shows that datetime.now(tz=utc) is 24% slower
return datetime.utcnow().replace(tzinfo=utc)
else:
return datetime.now()
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None
def is_naive(value):
"""
Determines if a given datetime.datetime is naive.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
def make_aware(value, timezone):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if hasattr(timezone, 'localize'):
# available for pytz time zones
return timezone.localize(value, is_dst=None)
else:
# may be wrong around DST changes
return value.replace(tzinfo=timezone)
def make_naive(value, timezone):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value.replace(tzinfo=None)
|
gpl-3.0
|
berrange/nova
|
nova/tests/api/openstack/compute/extensions/foxinsocks.py
|
103
|
2948
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
class FoxInSocksController(object):
def index(self, req):
return "Try to say this Mr. Knox, sir..."
class FoxInSocksServerControllerExtension(wsgi.Controller):
@wsgi.action('add_tweedle')
def _add_tweedle(self, req, id, body):
return "Tweedle Beetle Added."
@wsgi.action('delete_tweedle')
def _delete_tweedle(self, req, id, body):
return "Tweedle Beetle Deleted."
@wsgi.action('fail')
def _fail(self, req, id, body):
raise webob.exc.HTTPBadRequest(explanation='Tweedle fail')
class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller):
@wsgi.extends
def show(self, req, resp_obj, id):
# NOTE: This only handles JSON responses.
# You can use content type header to test for XML.
resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing')
class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller):
@wsgi.extends
def show(self, req, resp_obj, id):
# NOTE: This only handles JSON responses.
# You can use content type header to test for XML.
resp_obj.obj['big_bands'] = 'Pig Bands!'
class Foxinsocks(extensions.ExtensionDescriptor):
"""The Fox In Socks Extension."""
name = "Fox In Socks"
alias = "FOXNSOX"
namespace = "http://www.fox.in.socks/api/ext/pie/v1.0"
updated = "2011-01-22T13:25:27-06:00"
def __init__(self, ext_mgr):
ext_mgr.register(self)
def get_resources(self):
resources = []
resource = extensions.ResourceExtension('foxnsocks',
FoxInSocksController())
resources.append(resource)
return resources
def get_controller_extensions(self):
extension_list = []
extension_set = [
(FoxInSocksServerControllerExtension, 'servers'),
(FoxInSocksFlavorGooseControllerExtension, 'flavors'),
(FoxInSocksFlavorBandsControllerExtension, 'flavors'),
]
for klass, collection in extension_set:
controller = klass()
ext = extensions.ControllerExtension(self, collection, controller)
extension_list.append(ext)
return extension_list
|
apache-2.0
|
sujeetv/incubator-hawq
|
tools/bin/gppylib/operations/hdfs_cmd.py
|
9
|
8261
|
#!/usr/bin/env python
# encoding: utf-8
'''
This file contains all API for calling hdfs related commands directly
'''
import logging
import os, sys, subprocess
import re
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.DEBUG)
class HDFS_Cmd(object):
def __init__(self, hadoop_dir=None):
self.hadoop_dir = hadoop_dir;
def run_shell_cmd(self, cmd, isRaiseException=True, isReturnRC=False):
logger.debug('shell command is: %s', (cmd))
p = subprocess.Popen(cmd, shell=True,
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result = p.communicate()
if isReturnRC:
return p.returncode
if p.returncode != 0:
logger.debug(result[0])
if isRaiseException:
logger.error("Error at execute: %s %s %s" % (str(cmd), result[0], result[1]))
raise Exception(result[1])
return result[0]
def check_hdfs(self):
if self.hadoop_dir==None:
self.hadoop_dir = os.getenv('HADOOP_HOME')
if self.hadoop_dir==None:
try:
self.hadoop_dir = self.run_shell_cmd('which hadoop').rsplit('/', 2)[0]
except Exception, e:
logger.fatal('which hadoop failure: ' + str(e))
raise Exception("Can not find the path for hadoop command from env $HADOOP_HOME or $PATH")
if self.hadoop_dir!=None:
self.hdfs_path = os.path.join(self.hadoop_dir, 'bin')
if (not os.path.exists(self.hdfs_path )):
logger.error("Can not find the path: %s." % str(self.hdfs_path ))
raise Exception("Can not find the path: %s." % str(self.hdfs_path ))
def parse_command_output(self,inputs, regexps):
'''
inputs:
inputs: it can be a list with a nested list (like a table: row, column),
or a string includes all lines, which can be transformed as a table with one column
regexps: it can be a list with format [[input_col_id, regexp, output_group_num], ...] ,
or it can be a string which can be transformed as [[0, regexps, 1]]
Parse the inputs string with regular expression matched on each line, and fetch the groups of matched part as colums of one row
'''
result=[]
line_id = 0
p=[]
# if it is a string
if not hasattr(regexps, '__iter__') or isinstance(regexps, basestring):
regexps = [[0, regexps, 1]]
for rei in regexps:
if(len(rei)!=3):
logger.error("Format error for : %s" % (rei))
raise Exception("Format error for : %s" % (rei))
p.append(re.compile(rei[1]))
# if inputs is a non-string sequence and is iterable, then it should a list, each item stands for a line
if hasattr(inputs, '__iter__') and not isinstance(inputs, basestring):
content = inputs
else:
content = inputs.splitlines()
while(line_id<len(content)):
row = []
m = []
is_all_matched = True
j=-1
for rei in regexps:
j+=1
input_col_id = rei[0]
if input_col_id==-1 or content != inputs: #the whole item
line = content[line_id]
else:
line = content[line_id][input_col_id]
mi = p[j].match(line)
if mi:
m.append(mi)
else:
is_all_matched = False
break
if is_all_matched:
i=-1
for rei in regexps:
i+=1
group_num = rei[2]
for gid in range(1, group_num+1):
row.append(m[i].group(gid));
result.append(row)
line_id+=1;
return result
def run_hdfs_command(self,cmd, isDfs=True, isRaiseException=True, isReturnRC=False):
_cmd = os.path.join(self.hdfs_path, 'hdfs')
if (not os.path.exists(_cmd)):
logger.error("Can not find the path: %s." % str(_cmd))
raise Exception("Can not find the path: %s." % str(_cmd))
if isDfs:
_cmd += ' dfs -'
else:
_cmd +=' '
_cmd += '%s' % (cmd)
return self.run_shell_cmd(_cmd,isRaiseException,isReturnRC)
def run_hdfs_ls(self, path, isRecurs=False):
"""
Return [['d', 'dir_path1'], ['-', 'file_path1'], ...]
"""
cmd = 'ls '
if isRecurs:
cmd += '-R '
cmd += path
output = self.run_hdfs_command(cmd)
result = self.parse_command_output(output,[[0,'^([d|-])[r|w|x|-]{9,9}\s+[-|\d]\s+\w+\s+\w+\s+\d+\s+[\d|-]+\s+[\d|:]+\s+(.*)', 2]])
return result
def run_hdfs_mkdir(self, path, isRecurs=False):
cmd = 'mkdir '
if isRecurs:
cmd += '-p '
cmd += path
self.run_hdfs_command(cmd)
def run_hdfs_rm(self, path, isRecurs=False):
cmd = 'rm '
if isRecurs:
cmd += '-R '
cmd += path
self.run_hdfs_command(cmd)
def run_hdfs_mv(self, path, new_path):
cmd = 'mv %s %s'% (path, new_path)
self.run_hdfs_command(cmd)
def run_hdfs_test(self, path, flag):
cmd = 'test %s %s' % (flag, path)
return self.run_hdfs_command(cmd, isReturnRC=True)
def run_hdfs_leave_safemode(self):
# force to leave safe mode
cmd = 'dfsadmin -safemode leave'
self.run_hdfs_command(cmd, isDfs=False, isRaiseException=True)
def run_hdfs_create_snapshot(self,path, snapshotname):
# add permission
cmd = 'dfsadmin -allowSnapshot %s' % path;
self.run_hdfs_command(cmd, isDfs=False)
cmd = 'createSnapshot %s '% (path)
if snapshotname:
cmd += snapshotname
self.run_hdfs_command(cmd)
def run_hdfs_rename_snapshot(self, path, oldname, newname):
cmd = 'renameSnapshot %s %s %s'% (path, oldname, newname)
self.run_hdfs_command(cmd)
def run_hdfs_delete_snapshot(self, path, snapshotname):
cmd = 'deleteSnapshot %s %s'% (path, snapshotname)
self.run_hdfs_command(cmd)
def run_hdfs_restore_snapshot(self, path, snapshotname):
ss_path = os.path.join(path, '.snapshot', snapshotname)
result = self.run_hdfs_ls(ss_path, True)
for dir, path1 in result:
if dir!='d':
path2 = path1.replace("/.snapshot/%s"%snapshotname, "")
cmd = 'cp -f -p %s %s' % (path1, path2)
self.run_hdfs_command(cmd)
# successfully restored snapshot, we can delete the snapshot now
self.run_hdfs_delete_snapshot(path, snapshotname)
if __name__ == '__main__':
# firstly test gdb exists
#gdb_path = spawn.find_executable("hdfs")
#if not gdb_path:
#logger.info("hadoop hdfs command doesn't exists, please make sure you have installed hadoop!")
#exit(1);
hdfs_cmd = HDFS_Cmd('/Users/gpadmin/workspace/install/hadoop-2.4.1-gphd-3.2.0.0')
hdfs_cmd.check_hdfs()
result = hdfs_cmd.run_hdfs_ls('/hawq1/gpseg0', True)
print "result: %s" % result
result1 = hdfs_cmd.run_hdfs_ls('/hawq1/', False)
print "result1: %s" % result1
result2 = hdfs_cmd.parse_command_output(result1, [[0, '^(d)$', 1], [1, '^(.*/gpseg\d+)$', 1]])
print "result2: %s" % result2
result3 = hdfs_cmd.parse_command_output(result1, [[0, '^d$', 0], [1, '^(.*/gpseg\d+)$', 1]])
print "result3: %s" % result3
result4 = hdfs_cmd.run_hdfs_test('hdfs://localhost:9000/hawq1/gpseg0/.snapshot/.gpmigrator_orig', '-e');
print "result4: %s" % result4
result4 = hdfs_cmd.run_hdfs_test('hdfs://localhost:9000/hawq1/gpseg0/.snapshot/.gpmigrator_orig1', '-e');
print "result4: %s" % result4
hdfs_cmd = HDFS_Cmd()
hdfs_cmd.check_hdfs()
result = hdfs_cmd.run_hdfs_ls('/hawq1/gpseg1', True)
print "result: %s" % result
|
apache-2.0
|
ravibhure/ansible
|
lib/ansible/modules/cloud/amazon/cloudfront_distribution.py
|
14
|
85861
|
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudfront_distribution
short_description: create, update and delete aws cloudfront distributions.
description:
- Allows for easy creation, updating and deletion of CloudFront distributions.
requirements:
- boto3 >= 1.0.0
- python >= 2.6
version_added: "2.5"
author:
- Willem van Ketwich (@wilvk)
- Will Thames (@willthames)
extends_documentation_fragment:
- aws
- ec2
options:
state:
description:
- The desired state of the distribution
present - creates a new distribution or updates an existing distribution.
absent - deletes an existing distribution.
choices: ['present', 'absent']
default: 'present'
distribution_id:
description:
- The id of the cloudfront distribution. This parameter can be exchanged with I(alias) or I(caller_reference) and is used in conjunction with I(e_tag).
e_tag:
description:
- A unique identifier of a modified or existing distribution. Used in conjunction with I(distribution_id).
Is determined automatically if not specified.
caller_reference:
description:
- A unique identifier for creating and updating cloudfront distributions. Each caller reference must be unique across all distributions. e.g. a caller
reference used in a web distribution cannot be reused in a streaming distribution. This parameter can be used instead of I(distribution_id)
to reference an existing distribution. If not specified, this defaults to a datetime stamp of the format
'YYYY-MM-DDTHH:MM:SS.ffffff'.
tags:
description:
- Should be input as a dict() of key-value pairs.
Note that numeric keys or values must be wrapped in quotes. e.g. "Priority:" '1'
purge_tags:
description:
- Specifies whether existing tags will be removed before adding new tags. When I(purge_tags=yes), existing tags are removed and I(tags) are added, if
specified. If no tags are specified, it removes all existing tags for the distribution. When I(purge_tags=no), existing tags are kept and I(tags)
are added, if specified.
default: 'no'
choices: ['yes', 'no']
alias:
description:
- The name of an alias (CNAME) that is used in a distribution. This is used to effectively reference a distribution by its alias as an alias can only
be used by one distribution per AWS account. This variable avoids having to provide the I(distribution_id) as well as
the I(e_tag), or I(caller_reference) of an existing distribution.
aliases:
description:
- A I(list[]) of domain name aliases (CNAMEs) as strings to be used for the distribution. Each alias must be unique across all distribution for the AWS
account.
purge_aliases:
description:
- Specifies whether existing aliases will be removed before adding new aliases. When I(purge_aliases=yes), existing aliases are removed and I(aliases)
are added.
default: 'no'
choices: ['yes', 'no']
default_root_object:
description:
- A config element that specifies the path to request when the user requests the origin. e.g. if specified as 'index.html', this maps to
www.example.com/index.html when www.example.com is called by the user. This prevents the entire distribution origin from being exposed at the root.
default_origin_domain_name:
description:
- The domain name to use for an origin if no I(origins) have been specified. Should only be used on a first run of generating a distribution and not on
subsequent runs. Should not be used in conjunction with I(distribution_id), I(caller_reference) or I(alias).
default_origin_path:
description:
- The default origin path to specify for an origin if no I(origins) have been specified. Defaults to empty if not specified.
origins:
description:
- A config element that is a I(list[]) of complex origin objects to be specified for the distribution. Used for creating and updating distributions.
Each origin item comprises the attributes
I(id)
I(domain_name) (defaults to default_origin_domain_name if not specified)
I(origin_path) (defaults to default_origin_path if not specified)
I(custom_headers[])
I(header_name)
I(header_value)
I(s3_origin_access_identity_enabled)
I(custom_origin_config)
I(http_port)
I(https_port)
I(origin_protocol_policy)
I(origin_ssl_protocols[])
I(origin_read_timeout)
I(origin_keepalive_timeout)
purge_origins:
description: Whether to remove any origins that aren't listed in I(origins)
default: false
default_cache_behavior:
description:
- A config element that is a complex object specifying the default cache behavior of the distribution. If not specified, the I(target_origin_id) is
defined as the I(target_origin_id) of the first valid I(cache_behavior) in I(cache_behaviors) with defaults.
The default cache behavior comprises the attributes
I(target_origin_id)
I(forwarded_values)
I(query_string)
I(cookies)
I(forward)
I(whitelisted_names)
I(headers[])
I(query_string_cache_keys[])
I(trusted_signers)
I(enabled)
I(items[])
I(viewer_protocol_policy)
I(min_ttl)
I(allowed_methods)
I(items[])
I(cached_methods[])
I(smooth_streaming)
I(default_ttl)
I(max_ttl)
I(compress)
I(lambda_function_associations[])
I(lambda_function_arn)
I(event_type)
cache_behaviors:
description:
- A config element that is a I(list[]) of complex cache behavior objects to be specified for the distribution. The order
of the list is preserved across runs unless C(purge_cache_behavior) is enabled.
Each cache behavior comprises the attributes
I(path_pattern)
I(target_origin_id)
I(forwarded_values)
I(query_string)
I(cookies)
I(forward)
I(whitelisted_names)
I(headers[])
I(query_string_cache_keys[])
I(trusted_signers)
I(enabled)
I(items[])
I(viewer_protocol_policy)
I(min_ttl)
I(allowed_methods)
I(items[])
I(cached_methods[])
I(smooth_streaming)
I(default_ttl)
I(max_ttl)
I(compress)
I(lambda_function_associations[])
purge_cache_behaviors:
description: Whether to remove any cache behaviors that aren't listed in I(cache_behaviors). This switch
also allows the reordering of cache_behaviors.
default: false
custom_error_responses:
description:
- A config element that is a I(list[]) of complex custom error responses to be specified for the distribution. This attribute configures custom http
error messages returned to the user.
Each custom error response object comprises the attributes
I(error_code)
I(reponse_page_path)
I(response_code)
I(error_caching_min_ttl)
purge_custom_error_responses:
description: Whether to remove any custom error responses that aren't listed in I(custom_error_responses)
default: false
comment:
description:
- A comment that describes the cloudfront distribution. If not specified, it defaults to a
generic message that it has been created with Ansible, and a datetime stamp.
logging:
description:
- A config element that is a complex object that defines logging for the distribution.
The logging object comprises the attributes
I(enabled)
I(include_cookies)
I(bucket)
I(prefix)
price_class:
description:
- A string that specifies the pricing class of the distribution. As per
U(https://aws.amazon.com/cloudfront/pricing/)
I(price_class=PriceClass_100) consists of the areas
United States
Canada
Europe
I(price_class=PriceClass_200) consists of the areas
United States
Canada
Europe
Hong Kong, Philippines, S. Korea, Singapore & Taiwan
Japan
India
I(price_class=PriceClass_All) consists of the areas
United States
Canada
Europe
Hong Kong, Philippines, S. Korea, Singapore & Taiwan
Japan
India
South America
Australia
choices: ['PriceClass_100', 'PriceClass_200', 'PriceClass_All']
default: aws defaults this to 'PriceClass_All'
enabled:
description:
- A boolean value that specifies whether the distribution is enabled or disabled.
default: 'yes'
choices: ['yes', 'no']
viewer_certificate:
description:
- A config element that is a complex object that specifies the encryption details of the distribution.
Comprises the following attributes
I(cloudfront_default_certificate)
I(iam_certificate_id)
I(acm_certificate_arn)
I(ssl_support_method)
I(minimum_protocol_version)
I(certificate)
I(certificate_source)
restrictions:
description:
- A config element that is a complex object that describes how a distribution should restrict it's content.
The restriction object comprises the following attributes
I(geo_restriction)
I(restriction_type)
I(items[])
web_acl_id:
description:
- The id of a Web Application Firewall (WAF) Access Control List (ACL).
http_version:
description:
- The version of the http protocol to use for the distribution.
choices: [ 'http1.1', 'http2' ]
default: aws defaults this to 'http2'
ipv6_enabled:
description:
- Determines whether IPv6 support is enabled or not.
choices: ['yes', 'no']
default: 'no'
wait:
description:
- Specifies whether the module waits until the distribution has completed processing the creation or update.
choices: ['yes', 'no']
default: 'no'
wait_timeout:
description:
- Specifies the duration in seconds to wait for a timeout of a cloudfront create or update. Defaults to 1800 seconds (30 minutes).
default: 1800
'''
EXAMPLES = '''
# create a basic distribution with defaults and tags
- cloudfront_distribution:
state: present
default_origin_domain_name: www.my-cloudfront-origin.com
tags:
Name: example distribution
Project: example project
Priority: '1'
# update a distribution comment by distribution_id
- cloudfront_distribution:
state: present
distribution_id: E1RP5A2MJ8073O
comment: modified by ansible cloudfront.py
# update a distribution comment by caller_reference
- cloudfront_distribution:
state: present
caller_reference: my cloudfront distribution 001
comment: modified by ansible cloudfront.py
# update a distribution's aliases and comment using the distribution_id as a reference
- cloudfront_distribution:
state: present
distribution_id: E1RP5A2MJ8073O
comment: modified by cloudfront.py again
aliases: [ 'www.my-distribution-source.com', 'zzz.aaa.io' ]
# update a distribution's aliases and comment using an alias as a reference
- cloudfront_distribution:
state: present
caller_reference: my test distribution
comment: modified by cloudfront.py again
aliases:
- www.my-distribution-source.com
- zzz.aaa.io
# update a distribution's comment and aliases and tags and remove existing tags
- cloudfront_distribution:
state: present
distribution_id: E15BU8SDCGSG57
comment: modified by cloudfront.py again
aliases:
- tested.com
tags:
Project: distribution 1.2
purge_tags: yes
# create a distribution with an origin, logging and default cache behavior
- cloudfront_distribution:
state: present
caller_reference: unique test distribution id
origins:
- id: 'my test origin-000111'
domain_name: www.example.com
origin_path: /production
custom_headers:
- header_name: MyCustomHeaderName
header_value: MyCustomHeaderValue
default_cache_behavior:
target_origin_id: 'my test origin-000111'
forwarded_values:
query_string: true
cookies:
forward: all
headers:
- '*'
viewer_protocol_policy: allow-all
smooth_streaming: true
compress: true
allowed_methods:
items:
- GET
- HEAD
cached_methods:
- GET
- HEAD
logging:
enabled: true
include_cookies: false
bucket: mylogbucket.s3.amazonaws.com
prefix: myprefix/
enabled: false
comment: this is a cloudfront distribution with logging
# delete a distribution
- cloudfront_distribution:
state: absent
caller_reference: replaceable distribution
'''
RETURN = '''
active_trusted_signers:
description: Key pair IDs that CloudFront is aware of for each trusted signer
returned: always
type: complex
contains:
enabled:
description: Whether trusted signers are in use
returned: always
type: bool
sample: false
quantity:
description: Number of trusted signers
returned: always
type: int
sample: 1
items:
description: Number of trusted signers
returned: when there are trusted signers
type: list
sample:
- key_pair_id
aliases:
description: Aliases that refer to the distribution
returned: always
type: complex
contains:
items:
description: List of aliases
returned: always
type: list
sample:
- test.example.com
quantity:
description: Number of aliases
returned: always
type: int
sample: 1
arn:
description: Amazon Resource Name of the distribution
returned: always
type: string
sample: arn:aws:cloudfront::123456789012:distribution/E1234ABCDEFGHI
cache_behaviors:
description: Cloudfront cache behaviors
returned: always
type: complex
contains:
items:
description: List of cache behaviors
returned: always
type: complex
contains:
allowed_methods:
description: Methods allowed by the cache behavior
returned: always
type: complex
contains:
cached_methods:
description: Methods cached by the cache behavior
returned: always
type: complex
contains:
items:
description: List of cached methods
returned: always
type: list
sample:
- HEAD
- GET
quantity:
description: Count of cached methods
returned: always
type: int
sample: 2
items:
description: List of methods allowed by the cache behavior
returned: always
type: list
sample:
- HEAD
- GET
quantity:
description: Count of methods allowed by the cache behavior
returned: always
type: int
sample: 2
compress:
description: Whether compression is turned on for the cache behavior
returned: always
type: bool
sample: false
default_ttl:
description: Default Time to Live of the cache behavior
returned: always
type: int
sample: 86400
forwarded_values:
description: Values forwarded to the origin for this cache behavior
returned: always
type: complex
contains:
cookies:
description: Cookies to forward to the origin
returned: always
type: complex
contains:
forward:
description: Which cookies to forward to the origin for this cache behavior
returned: always
type: string
sample: none
whitelisted_names:
description: The names of the cookies to forward to the origin for this cache behavior
returned: when I(forward) is C(whitelist)
type: complex
contains:
quantity:
description: Count of cookies to forward
returned: always
type: int
sample: 1
items:
description: List of cookies to forward
returned: when list is not empty
type: list
sample: my_cookie
headers:
description: Which headers are used to vary on cache retrievals
returned: always
type: complex
contains:
quantity:
description: Count of headers to vary on
returned: always
type: int
sample: 1
items:
description: List of headers to vary on
returned: when list is not empty
type: list
sample:
- Host
query_string:
description: Whether the query string is used in cache lookups
returned: always
type: bool
sample: false
query_string_cache_keys:
description: Which query string keys to use in cache lookups
returned: always
type: complex
contains:
quantity:
description: Count of query string cache keys to use in cache lookups
returned: always
type: int
sample: 1
items:
description: List of query string cache keys to use in cache lookups
returned: when list is not empty
type: list
sample:
lambda_function_associations:
description: Lambda function associations for a cache behavior
returned: always
type: complex
contains:
quantity:
description: Count of lambda function associations
returned: always
type: int
sample: 1
items:
description: List of lambda function associations
returned: when list is not empty
type: list
sample:
- lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function
event_type: viewer-response
max_ttl:
description: Maximum Time to Live
returned: always
type: int
sample: 31536000
min_ttl:
description: Minimum Time to Live
returned: always
type: int
sample: 0
path_pattern:
description: Path pattern that determines this cache behavior
returned: always
type: string
sample: /path/to/files/*
smooth_streaming:
description: Whether smooth streaming is enabled
returned: always
type: bool
sample: false
target_origin_id:
description: Id of origin reference by this cache behavior
returned: always
type: string
sample: origin_abcd
trusted_signers:
description: Trusted signers
returned: always
type: complex
contains:
enabled:
description: Whether trusted signers are enabled for this cache behavior
returned: always
type: bool
sample: false
quantity:
description: Count of trusted signers
returned: always
type: int
sample: 1
viewer_protocol_policy:
description: Policy of how to handle http/https
returned: always
type: string
sample: redirect-to-https
quantity:
description: Count of cache behaviors
returned: always
type: int
sample: 1
caller_reference:
description: Idempotency reference given when creating cloudfront distribution
returned: always
type: string
sample: '1484796016700'
comment:
description: Any comments you want to include about the distribution
returned: always
type: string
sample: 'my first cloudfront distribution'
custom_error_responses:
description: Custom error responses to use for error handling
returned: always
type: complex
contains:
items:
description: List of custom error responses
returned: always
type: complex
contains:
error_caching_min_ttl:
description: Mininum time to cache this error response
returned: always
type: int
sample: 300
error_code:
description: Origin response code that triggers this error response
returned: always
type: int
sample: 500
response_code:
description: Response code to return to the requester
returned: always
type: string
sample: '500'
response_page_path:
description: Path that contains the error page to display
returned: always
type: string
sample: /errors/5xx.html
quantity:
description: Count of custom error response items
returned: always
type: int
sample: 1
default_cache_behavior:
description: Default cache behavior
returned: always
type: complex
contains:
allowed_methods:
description: Methods allowed by the cache behavior
returned: always
type: complex
contains:
cached_methods:
description: Methods cached by the cache behavior
returned: always
type: complex
contains:
items:
description: List of cached methods
returned: always
type: list
sample:
- HEAD
- GET
quantity:
description: Count of cached methods
returned: always
type: int
sample: 2
items:
description: List of methods allowed by the cache behavior
returned: always
type: list
sample:
- HEAD
- GET
quantity:
description: Count of methods allowed by the cache behavior
returned: always
type: int
sample: 2
compress:
description: Whether compression is turned on for the cache behavior
returned: always
type: bool
sample: false
default_ttl:
description: Default Time to Live of the cache behavior
returned: always
type: int
sample: 86400
forwarded_values:
description: Values forwarded to the origin for this cache behavior
returned: always
type: complex
contains:
cookies:
description: Cookies to forward to the origin
returned: always
type: complex
contains:
forward:
description: Which cookies to forward to the origin for this cache behavior
returned: always
type: string
sample: none
whitelisted_names:
description: The names of the cookies to forward to the origin for this cache behavior
returned: when I(forward) is C(whitelist)
type: complex
contains:
quantity:
description: Count of cookies to forward
returned: always
type: int
sample: 1
items:
description: List of cookies to forward
returned: when list is not empty
type: list
sample: my_cookie
headers:
description: Which headers are used to vary on cache retrievals
returned: always
type: complex
contains:
quantity:
description: Count of headers to vary on
returned: always
type: int
sample: 1
items:
description: List of headers to vary on
returned: when list is not empty
type: list
sample:
- Host
query_string:
description: Whether the query string is used in cache lookups
returned: always
type: bool
sample: false
query_string_cache_keys:
description: Which query string keys to use in cache lookups
returned: always
type: complex
contains:
quantity:
description: Count of query string cache keys to use in cache lookups
returned: always
type: int
sample: 1
items:
description: List of query string cache keys to use in cache lookups
returned: when list is not empty
type: list
sample:
lambda_function_associations:
description: Lambda function associations for a cache behavior
returned: always
type: complex
contains:
quantity:
description: Count of lambda function associations
returned: always
type: int
sample: 1
items:
description: List of lambda function associations
returned: when list is not empty
type: list
sample:
- lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function
event_type: viewer-response
max_ttl:
description: Maximum Time to Live
returned: always
type: int
sample: 31536000
min_ttl:
description: Minimum Time to Live
returned: always
type: int
sample: 0
path_pattern:
description: Path pattern that determines this cache behavior
returned: always
type: string
sample: /path/to/files/*
smooth_streaming:
description: Whether smooth streaming is enabled
returned: always
type: bool
sample: false
target_origin_id:
description: Id of origin reference by this cache behavior
returned: always
type: string
sample: origin_abcd
trusted_signers:
description: Trusted signers
returned: always
type: complex
contains:
enabled:
description: Whether trusted signers are enabled for this cache behavior
returned: always
type: bool
sample: false
quantity:
description: Count of trusted signers
returned: always
type: int
sample: 1
viewer_protocol_policy:
description: Policy of how to handle http/https
returned: always
type: string
sample: redirect-to-https
default_root_object:
description: The object that you want CloudFront to request from your origin (for example, index.html)
when a viewer requests the root URL for your distribution
returned: always
type: string
sample: ''
diff:
description: Difference between previous configuration and new configuration
returned: always
type: dict
sample: {}
domain_name:
description: Domain name of cloudfront distribution
returned: always
type: string
sample: d1vz8pzgurxosf.cloudfront.net
enabled:
description: Whether the cloudfront distribution is enabled or not
returned: always
type: bool
sample: true
http_version:
description: Version of HTTP supported by the distribution
returned: always
type: string
sample: http2
id:
description: Cloudfront distribution ID
returned: always
type: string
sample: E123456ABCDEFG
in_progress_invalidation_batches:
description: The number of invalidation batches currently in progress
returned: always
type: int
sample: 0
is_ipv6_enabled:
description: Whether IPv6 is enabled
returned: always
type: bool
sample: true
last_modified_time:
description: Date and time distribution was last modified
returned: always
type: string
sample: '2017-10-13T01:51:12.656000+00:00'
logging:
description: Logging information
returned: always
type: complex
contains:
bucket:
description: S3 bucket logging destination
returned: always
type: string
sample: logs-example-com.s3.amazonaws.com
enabled:
description: Whether logging is enabled
returned: always
type: bool
sample: true
include_cookies:
description: Whether to log cookies
returned: always
type: bool
sample: false
prefix:
description: Prefix added to logging object names
returned: always
type: string
sample: cloudfront/test
origins:
description: Origins in the cloudfront distribution
returned: always
type: complex
contains:
items:
description: List of origins
returned: always
type: complex
contains:
custom_headers:
description: Custom headers passed to the origin
returned: always
type: complex
contains:
quantity:
description: Count of headers
returned: always
type: int
sample: 1
custom_origin_config:
description: Configuration of the origin
returned: always
type: complex
contains:
http_port:
description: Port on which HTTP is listening
returned: always
type: int
sample: 80
https_port:
description: Port on which HTTPS is listening
returned: always
type: int
sample: 443
origin_keepalive_timeout:
description: Keep-alive timeout
returned: always
type: int
sample: 5
origin_protocol_policy:
description: Policy of which protocols are supported
returned: always
type: string
sample: https-only
origin_read_timeout:
description: Timeout for reads to the origin
returned: always
type: int
sample: 30
origin_ssl_protocols:
description: SSL protocols allowed by the origin
returned: always
type: complex
contains:
items:
description: List of SSL protocols
returned: always
type: list
sample:
- TLSv1
- TLSv1.1
- TLSv1.2
quantity:
description: Count of SSL protocols
returned: always
type: int
sample: 3
domain_name:
description: Domain name of the origin
returned: always
type: string
sample: test-origin.example.com
id:
description: ID of the origin
returned: always
type: string
sample: test-origin.example.com
origin_path:
description: Subdirectory to prefix the request from the S3 or HTTP origin
returned: always
type: string
sample: ''
quantity:
description: Count of origins
returned: always
type: int
sample: 1
price_class:
description: Price class of cloudfront distribution
returned: always
type: string
sample: PriceClass_All
restrictions:
description: Restrictions in use by Cloudfront
returned: always
type: complex
contains:
geo_restriction:
description: Controls the countries in which your content is distributed.
returned: always
type: complex
contains:
quantity:
description: Count of restrictions
returned: always
type: int
sample: 1
items:
description: List of country codes allowed or disallowed
returned: always
type: list
sample: xy
restriction_type:
description: Type of restriction
returned: always
type: string
sample: blacklist
status:
description: Status of the cloudfront distribution
returned: always
type: string
sample: InProgress
tags:
description: Distribution tags
returned: always
type: dict
sample:
Hello: World
viewer_certificate:
description: Certificate used by cloudfront distribution
returned: always
type: complex
contains:
acm_certificate_arn:
description: ARN of ACM certificate
returned: when certificate comes from ACM
type: string
sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef
certificate:
description: Reference to certificate
returned: always
type: string
sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef
certificate_source:
description: Where certificate comes from
returned: always
type: string
sample: acm
minimum_protocol_version:
description: Minimum SSL/TLS protocol supported by this distribution
returned: always
type: string
sample: TLSv1
ssl_support_method:
description: Support for pre-SNI browsers or not
returned: always
type: string
sample: sni-only
web_acl_id:
description: ID of Web Access Control List (from WAF service)
returned: always
type: string
sample: abcd1234-1234-abcd-abcd-abcd12345678
'''
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager
from ansible.module_utils.ec2 import get_aws_connection_info
from ansible.module_utils.ec2 import ec2_argument_spec, boto3_conn, compare_aws_tags
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list
from ansible.module_utils.ec2 import snake_dict_to_camel_dict, boto3_tag_list_to_ansible_dict
import datetime
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
pass # caught by AnsibleAWSModule (as python 2.6 + boto3 => ordereddict is installed)
try:
import botocore
except ImportError:
pass
def change_dict_key_name(dictionary, old_key, new_key):
if old_key in dictionary:
dictionary[new_key] = dictionary.get(old_key)
dictionary.pop(old_key, None)
return dictionary
def merge_validation_into_config(config, validated_node, node_name):
if validated_node is not None:
if isinstance(validated_node, dict):
config_node = config.get(node_name)
if config_node is not None:
config_node_items = list(config_node.items())
else:
config_node_items = []
config[node_name] = dict(config_node_items + list(validated_node.items()))
if isinstance(validated_node, list):
config[node_name] = list(set(config.get(node_name) + validated_node))
return config
def ansible_list_to_cloudfront_list(list_items=None, include_quantity=True):
if list_items is None:
list_items = []
if not isinstance(list_items, list):
raise ValueError('Expected a list, got a {0} with value {1}'.format(type(list_items).__name__, str(list_items)))
result = {}
if include_quantity:
result['quantity'] = len(list_items)
if len(list_items) > 0:
result['items'] = list_items
return result
def recursive_diff(dict1, dict2):
left = dict((k, v) for (k, v) in dict1.items() if k not in dict2)
right = dict((k, v) for (k, v) in dict2.items() if k not in dict1)
for k in (set(dict1.keys()) & set(dict2.keys())):
if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):
result = recursive_diff(dict1[k], dict2[k])
if result:
left[k] = result[0]
right[k] = result[1]
elif dict1[k] != dict2[k]:
left[k] = dict1[k]
right[k] = dict2[k]
if left or right:
return left, right
else:
return None
def create_distribution(client, module, config, tags):
try:
if not tags:
return client.create_distribution(DistributionConfig=config)['Distribution']
else:
distribution_config_with_tags = {
'DistributionConfig': config,
'Tags': {
'Items': tags
}
}
return client.create_distribution_with_tags(DistributionConfigWithTags=distribution_config_with_tags)['Distribution']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error creating distribution")
def delete_distribution(client, module, distribution):
try:
return client.delete_distribution(Id=distribution['Distribution']['Id'], IfMatch=distribution['ETag'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error deleting distribution %s" % to_native(distribution['Distribution']))
def update_distribution(client, module, config, distribution_id, e_tag):
try:
return client.update_distribution(DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)['Distribution']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error updating distribution to %s" % to_native(config))
def tag_resource(client, module, arn, tags):
try:
return client.tag_resource(Resource=arn, Tags=dict(Items=tags))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error tagging resource")
def untag_resource(client, module, arn, tag_keys):
try:
return client.untag_resource(Resource=arn, TagKeys=dict(Items=tag_keys))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error untagging resource")
def list_tags_for_resource(client, module, arn):
try:
response = client.list_tags_for_resource(Resource=arn)
return boto3_tag_list_to_ansible_dict(response.get('Tags').get('Items'))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error listing tags for resource")
def update_tags(client, module, existing_tags, valid_tags, purge_tags, arn):
changed = False
to_add, to_remove = compare_aws_tags(existing_tags, valid_tags, purge_tags)
if to_remove:
untag_resource(client, module, arn, to_remove)
changed = True
if to_add:
tag_resource(client, module, arn, ansible_dict_to_boto3_tag_list(to_add))
changed = True
return changed
class CloudFrontValidationManager(object):
"""
Manages Cloudfront validations
"""
def __init__(self, module):
self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
self.module = module
self.__default_distribution_enabled = True
self.__default_http_port = 80
self.__default_https_port = 443
self.__default_ipv6_enabled = False
self.__default_origin_ssl_protocols = [
'TLSv1',
'TLSv1.1',
'TLSv1.2'
]
self.__default_custom_origin_protocol_policy = 'match-viewer'
self.__default_custom_origin_read_timeout = 30
self.__default_custom_origin_keepalive_timeout = 5
self.__default_datetime_string = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
self.__default_cache_behavior_min_ttl = 0
self.__default_cache_behavior_max_ttl = 31536000
self.__default_cache_behavior_default_ttl = 86400
self.__default_cache_behavior_compress = False
self.__default_cache_behavior_viewer_protocol_policy = 'allow-all'
self.__default_cache_behavior_smooth_streaming = False
self.__default_cache_behavior_forwarded_values_forward_cookies = 'none'
self.__default_cache_behavior_forwarded_values_query_string = True
self.__default_trusted_signers_enabled = False
self.__valid_price_classes = set([
'PriceClass_100',
'PriceClass_200',
'PriceClass_All'
])
self.__valid_origin_protocol_policies = set([
'http-only',
'match-viewer',
'https-only'
])
self.__valid_origin_ssl_protocols = set([
'SSLv3',
'TLSv1',
'TLSv1.1',
'TLSv1.2'
])
self.__valid_cookie_forwarding = set([
'none',
'whitelist',
'all'
])
self.__valid_viewer_protocol_policies = set([
'allow-all',
'https-only',
'redirect-to-https'
])
self.__valid_methods = set([
'GET',
'HEAD',
'POST',
'PUT',
'PATCH',
'OPTIONS',
'DELETE'
])
self.__valid_methods_cached_methods = [
set([
'GET',
'HEAD'
]),
set([
'GET',
'HEAD',
'OPTIONS'
])
]
self.__valid_methods_allowed_methods = [
self.__valid_methods_cached_methods[0],
self.__valid_methods_cached_methods[1],
self.__valid_methods
]
self.__valid_lambda_function_association_event_types = set([
'viewer-request',
'viewer-response',
'origin-request',
'origin-response'
])
self.__valid_viewer_certificate_ssl_support_methods = set([
'sni-only',
'vip'
])
self.__valid_viewer_certificate_minimum_protocol_versions = set([
'SSLv3',
'TLSv1'
])
self.__valid_viewer_certificate_certificate_sources = set([
'cloudfront',
'iam',
'acm'
])
self.__valid_http_versions = set([
'http1.1',
'http2'
])
self.__s3_bucket_domain_identifier = '.s3.amazonaws.com'
def add_missing_key(self, dict_object, key_to_set, value_to_set):
if key_to_set not in dict_object and value_to_set is not None:
dict_object[key_to_set] = value_to_set
return dict_object
def add_key_else_change_dict_key(self, dict_object, old_key, new_key, value_to_set):
if old_key not in dict_object and value_to_set is not None:
dict_object[new_key] = value_to_set
else:
dict_object = change_dict_key_name(dict_object, old_key, new_key)
return dict_object
def add_key_else_validate(self, dict_object, key_name, attribute_name, value_to_set, valid_values, to_aws_list=False):
if key_name in dict_object:
self.validate_attribute_with_allowed_values(value_to_set, attribute_name, valid_values)
else:
if to_aws_list:
dict_object[key_name] = ansible_list_to_cloudfront_list(value_to_set)
elif value_to_set is not None:
dict_object[key_name] = value_to_set
return dict_object
def validate_logging(self, logging):
try:
if logging is None:
return None
valid_logging = {}
if logging and not set(['enabled', 'include_cookies', 'bucket', 'prefix']).issubset(logging):
self.module.fail_json(msg="The logging parameters enabled, include_cookies, bucket and prefix must be specified.")
valid_logging['include_cookies'] = logging.get('include_cookies')
valid_logging['enabled'] = logging.get('enabled')
valid_logging['bucket'] = logging.get('bucket')
valid_logging['prefix'] = logging.get('prefix')
return valid_logging
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution logging")
def validate_is_list(self, list_to_validate, list_name):
if not isinstance(list_to_validate, list):
self.module.fail_json(msg='%s is of type %s. Must be a list.' % (list_name, type(list_to_validate).__name__))
def validate_required_key(self, key_name, full_key_name, dict_object):
if key_name not in dict_object:
self.module.fail_json(msg="%s must be specified." % full_key_name)
def validate_origins(self, client, config, origins, default_origin_domain_name,
default_origin_path, create_distribution, purge_origins=False):
try:
if origins is None:
if default_origin_domain_name is None and not create_distribution:
if purge_origins:
return None
else:
return ansible_list_to_cloudfront_list(config)
if default_origin_domain_name is not None:
origins = [{
'domain_name': default_origin_domain_name,
'origin_path': default_origin_path or ''
}]
else:
origins = []
self.validate_is_list(origins, 'origins')
if not origins and default_origin_domain_name is None and create_distribution:
self.module.fail_json(msg="Both origins[] and default_origin_domain_name have not been specified. Please specify at least one.")
all_origins = OrderedDict()
new_domains = list()
for origin in config:
all_origins[origin.get('domain_name')] = origin
for origin in origins:
origin = self.validate_origin(client, all_origins.get(origin.get('domain_name'), {}), origin, default_origin_path)
all_origins[origin['domain_name']] = origin
new_domains.append(origin['domain_name'])
if purge_origins:
for domain in all_origins:
if domain not in new_domains:
del(all_origins[domain])
return ansible_list_to_cloudfront_list(all_origins.values())
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution origins")
def validate_s3_origin_configuration(self, client, existing_config, origin):
if origin['s3_origin_access_identity_enabled'] and existing_config.get('s3_origin_config', {}).get('origin_access_identity'):
return existing_config['s3_origin_config']['origin_access_identity']
if not origin['s3_origin_access_identity_enabled']:
return None
try:
comment = "Origin Access Identity created by Ansible at %s" % self.__default_datetime_string
cfoai_config = dict(CloudFrontOriginAccessIdentityConfig=dict(CallerReference=self.__default_datetime_string,
Comment=comment))
oai = client.create_cloud_front_origin_access_identity(**cfoai_config)['CloudFrontOriginAccessIdentity']['Id']
except Exception as e:
self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin['id'])
return "origin-access-identity/cloudfront/%s" % oai
def validate_origin(self, client, existing_config, origin, default_origin_path):
try:
origin = self.add_missing_key(origin, 'origin_path', existing_config.get('origin_path', default_origin_path or ''))
self.validate_required_key('origin_path', 'origins[].origin_path', origin)
origin = self.add_missing_key(origin, 'id', existing_config.get('id', self.__default_datetime_string))
if 'custom_headers' in origin and len(origin.get('custom_headers')) > 0:
for custom_header in origin.get('custom_headers'):
if 'header_name' not in custom_header or 'header_value' not in custom_header:
self.module.fail_json(msg="Both origins[].custom_headers.header_name and origins[].custom_headers.header_value must be specified.")
origin['custom_headers'] = ansible_list_to_cloudfront_list(origin.get('custom_headers'))
else:
origin['custom_headers'] = ansible_list_to_cloudfront_list()
if self.__s3_bucket_domain_identifier in origin.get('domain_name').lower():
if origin.get("s3_origin_access_identity_enabled") is not None:
s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin)
if s3_origin_config:
oai = s3_origin_config
else:
oai = ""
origin["s3_origin_config"] = dict(origin_access_identity=oai)
del(origin["s3_origin_access_identity_enabled"])
if 'custom_origin_config' in origin:
self.module.fail_json(msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive")
else:
origin = self.add_missing_key(origin, 'custom_origin_config', existing_config.get('custom_origin_config', {}))
custom_origin_config = origin.get('custom_origin_config')
custom_origin_config = self.add_key_else_validate(custom_origin_config, 'origin_protocol_policy',
'origins[].custom_origin_config.origin_protocol_policy',
self.__default_custom_origin_protocol_policy, self.__valid_origin_protocol_policies)
custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_read_timeout', self.__default_custom_origin_read_timeout)
custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_keepalive_timeout', self.__default_custom_origin_keepalive_timeout)
custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'http_port', 'h_t_t_p_port', self.__default_http_port)
custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'https_port', 'h_t_t_p_s_port', self.__default_https_port)
if custom_origin_config.get('origin_ssl_protocols', {}).get('items'):
custom_origin_config['origin_ssl_protocols'] = custom_origin_config['origin_ssl_protocols']['items']
if custom_origin_config.get('origin_ssl_protocols'):
self.validate_attribute_list_with_allowed_list(custom_origin_config['origin_ssl_protocols'], 'origins[].origin_ssl_protocols',
self.__valid_origin_ssl_protocols)
else:
custom_origin_config['origin_ssl_protocols'] = self.__default_origin_ssl_protocols
custom_origin_config['origin_ssl_protocols'] = ansible_list_to_cloudfront_list(custom_origin_config['origin_ssl_protocols'])
return origin
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error validating distribution origin")
def validate_cache_behaviors(self, config, cache_behaviors, valid_origins, purge_cache_behaviors=False):
try:
if cache_behaviors is None and valid_origins is not None and purge_cache_behaviors is False:
return ansible_list_to_cloudfront_list(config)
all_cache_behaviors = OrderedDict()
# cache behaviors are order dependent so we don't preserve the existing ordering when purge_cache_behaviors
# is true (if purge_cache_behaviors is not true, we can't really know the full new order)
if not purge_cache_behaviors:
for behavior in config:
all_cache_behaviors[behavior['path_pattern']] = behavior
for cache_behavior in cache_behaviors:
valid_cache_behavior = self.validate_cache_behavior(all_cache_behaviors.get(cache_behavior.get('path_pattern'), {}),
cache_behavior, valid_origins)
all_cache_behaviors[cache_behavior['path_pattern']] = valid_cache_behavior
if purge_cache_behaviors:
for target_origin_id in set(all_cache_behaviors.keys()) - set([cb['path_pattern'] for cb in cache_behaviors]):
del(all_cache_behaviors[target_origin_id])
return ansible_list_to_cloudfront_list(all_cache_behaviors.values())
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution cache behaviors")
def validate_cache_behavior(self, config, cache_behavior, valid_origins, is_default_cache=False):
if is_default_cache and cache_behavior is None:
cache_behavior = {}
if cache_behavior is None and valid_origins is not None:
return config
cache_behavior = self.validate_cache_behavior_first_level_keys(config, cache_behavior, valid_origins, is_default_cache)
cache_behavior = self.validate_forwarded_values(config, cache_behavior.get('forwarded_values'), cache_behavior)
cache_behavior = self.validate_allowed_methods(config, cache_behavior.get('allowed_methods'), cache_behavior)
cache_behavior = self.validate_lambda_function_associations(config, cache_behavior.get('lambda_function_associations'), cache_behavior)
cache_behavior = self.validate_trusted_signers(config, cache_behavior.get('trusted_signers'), cache_behavior)
return cache_behavior
def validate_cache_behavior_first_level_keys(self, config, cache_behavior, valid_origins, is_default_cache):
try:
cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'min_ttl', 'min_t_t_l',
config.get('min_t_t_l', self.__default_cache_behavior_min_ttl))
cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'max_ttl', 'max_t_t_l',
config.get('max_t_t_l', self.__default_cache_behavior_max_ttl))
cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'default_ttl', 'default_t_t_l',
config.get('default_t_t_l', self.__default_cache_behavior_default_ttl))
cache_behavior = self.add_missing_key(cache_behavior, 'compress', config.get('compress', self.__default_cache_behavior_compress))
target_origin_id = cache_behavior.get('target_origin_id', config.get('target_origin_id'))
if not target_origin_id:
target_origin_id = self.get_first_origin_id_for_default_cache_behavior(valid_origins)
if target_origin_id not in [origin['id'] for origin in valid_origins.get('items', [])]:
if is_default_cache:
cache_behavior_name = 'Default cache behavior'
else:
cache_behavior_name = 'Cache behavior for path %s' % cache_behavior['path_pattern']
self.module.fail_json(msg="%s has target_origin_id pointing to an origin that does not exist." %
cache_behavior_name)
cache_behavior['target_origin_id'] = target_origin_id
cache_behavior = self.add_key_else_validate(cache_behavior, 'viewer_protocol_policy', 'cache_behavior.viewer_protocol_policy',
config.get('viewer_protocol_policy',
self.__default_cache_behavior_viewer_protocol_policy),
self.__valid_viewer_protocol_policies)
cache_behavior = self.add_missing_key(cache_behavior, 'smooth_streaming',
config.get('smooth_streaming', self.__default_cache_behavior_smooth_streaming))
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution cache behavior first level keys")
def validate_forwarded_values(self, config, forwarded_values, cache_behavior):
try:
if not forwarded_values:
forwarded_values = dict()
existing_config = config.get('forwarded_values', {})
headers = forwarded_values.get('headers', existing_config.get('headers', {}).get('items'))
forwarded_values['headers'] = ansible_list_to_cloudfront_list(headers)
if 'cookies' not in forwarded_values:
forward = existing_config.get('cookies', {}).get('forward', self.__default_cache_behavior_forwarded_values_forward_cookies)
forwarded_values['cookies'] = {'forward': forward}
else:
existing_whitelist = existing_config.get('cookies', {}).get('whitelisted_names', {}).get('items')
whitelist = forwarded_values.get('cookies').get('whitelisted_names', existing_whitelist)
if whitelist:
self.validate_is_list(whitelist, 'forwarded_values.whitelisted_names')
forwarded_values['cookies']['whitelisted_names'] = ansible_list_to_cloudfront_list(whitelist)
cookie_forwarding = forwarded_values.get('cookies').get('forward', existing_config.get('cookies', {}).get('forward'))
self.validate_attribute_with_allowed_values(cookie_forwarding, 'cache_behavior.forwarded_values.cookies.forward',
self.__valid_cookie_forwarding)
forwarded_values['cookies']['forward'] = cookie_forwarding
query_string_cache_keys = forwarded_values.get('query_string_cache_keys', existing_config.get('query_string_cache_keys', {}).get('items', []))
self.validate_is_list(query_string_cache_keys, 'forwarded_values.query_string_cache_keys')
forwarded_values['query_string_cache_keys'] = ansible_list_to_cloudfront_list(query_string_cache_keys)
forwarded_values = self.add_missing_key(forwarded_values, 'query_string',
existing_config.get('query_string', self.__default_cache_behavior_forwarded_values_query_string))
cache_behavior['forwarded_values'] = forwarded_values
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating forwarded values")
def validate_lambda_function_associations(self, config, lambda_function_associations, cache_behavior):
try:
if lambda_function_associations is not None:
self.validate_is_list(lambda_function_associations, 'lambda_function_associations')
for association in lambda_function_associations:
association = change_dict_key_name(association, 'lambda_function_arn', 'lambda_function_a_r_n')
self.validate_attribute_with_allowed_values(association.get('event_type'), 'cache_behaviors[].lambda_function_associations.event_type',
self.__valid_lambda_function_association_event_types)
cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list(lambda_function_associations)
else:
if 'lambda_function_associations' in config:
cache_behavior['lambda_function_associations'] = config.get('lambda_function_associations')
else:
cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list([])
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating lambda function associations")
def validate_allowed_methods(self, config, allowed_methods, cache_behavior):
try:
if allowed_methods is not None:
self.validate_required_key('items', 'cache_behavior.allowed_methods.items[]', allowed_methods)
temp_allowed_items = allowed_methods.get('items')
self.validate_is_list(temp_allowed_items, 'cache_behavior.allowed_methods.items')
self.validate_attribute_list_with_allowed_list(temp_allowed_items, 'cache_behavior.allowed_methods.items[]',
self.__valid_methods_allowed_methods)
cached_items = allowed_methods.get('cached_methods')
if 'cached_methods' in allowed_methods:
self.validate_is_list(cached_items, 'cache_behavior.allowed_methods.cached_methods')
self.validate_attribute_list_with_allowed_list(cached_items, 'cache_behavior.allowed_items.cached_methods[]',
self.__valid_methods_cached_methods)
# we don't care if the order of how cloudfront stores the methods differs - preserving existing
# order reduces likelihood of making unnecessary changes
if 'allowed_methods' in config and set(config['allowed_methods']['items']) == set(temp_allowed_items):
cache_behavior['allowed_methods'] = config['allowed_methods']
else:
cache_behavior['allowed_methods'] = ansible_list_to_cloudfront_list(temp_allowed_items)
if cached_items and set(cached_items) == set(config.get('allowed_methods', {}).get('cached_methods', {}).get('items', [])):
cache_behavior['allowed_methods']['cached_methods'] = config['allowed_methods']['cached_methods']
else:
cache_behavior['allowed_methods']['cached_methods'] = ansible_list_to_cloudfront_list(cached_items)
else:
if 'allowed_methods' in config:
cache_behavior['allowed_methods'] = config.get('allowed_methods')
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating allowed methods")
def validate_trusted_signers(self, config, trusted_signers, cache_behavior):
try:
if trusted_signers is None:
trusted_signers = {}
if 'items' in trusted_signers:
valid_trusted_signers = ansible_list_to_cloudfront_list(trusted_signers.get('items'))
else:
valid_trusted_signers = dict(quantity=config.get('quantity', 0))
if 'items' in config:
valid_trusted_signers = dict(items=config['items'])
valid_trusted_signers['enabled'] = trusted_signers.get('enabled', config.get('enabled', self.__default_trusted_signers_enabled))
cache_behavior['trusted_signers'] = valid_trusted_signers
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating trusted signers")
def validate_viewer_certificate(self, viewer_certificate):
try:
if viewer_certificate is None:
return None
if viewer_certificate.get('cloudfront_default_certificate') and viewer_certificate.get('ssl_support_method') is not None:
self.module.fail_json(msg="viewer_certificate.ssl_support_method should not be specified with viewer_certificate_cloudfront_default" +
"_certificate set to true.")
self.validate_attribute_with_allowed_values(viewer_certificate.get('ssl_support_method'), 'viewer_certificate.ssl_support_method',
self.__valid_viewer_certificate_ssl_support_methods)
self.validate_attribute_with_allowed_values(viewer_certificate.get('minimum_protocol_version'), 'viewer_certificate.minimum_protocol_version',
self.__valid_viewer_certificate_minimum_protocol_versions)
self.validate_attribute_with_allowed_values(viewer_certificate.get('certificate_source'), 'viewer_certificate.certificate_source',
self.__valid_viewer_certificate_certificate_sources)
viewer_certificate = change_dict_key_name(viewer_certificate, 'cloudfront_default_certificate', 'cloud_front_default_certificate')
viewer_certificate = change_dict_key_name(viewer_certificate, 'ssl_support_method', 's_s_l_support_method')
viewer_certificate = change_dict_key_name(viewer_certificate, 'iam_certificate_id', 'i_a_m_certificate_id')
viewer_certificate = change_dict_key_name(viewer_certificate, 'acm_certificate_arn', 'a_c_m_certificate_arn')
return viewer_certificate
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating viewer certificate")
def validate_custom_error_responses(self, config, custom_error_responses, purge_custom_error_responses):
try:
if custom_error_responses is None and not purge_custom_error_responses:
return ansible_list_to_cloudfront_list(config)
self.validate_is_list(custom_error_responses, 'custom_error_responses')
result = list()
existing_responses = dict((response['error_code'], response) for response in custom_error_responses)
for custom_error_response in custom_error_responses:
self.validate_required_key('error_code', 'custom_error_responses[].error_code', custom_error_response)
custom_error_response = change_dict_key_name(custom_error_response, 'error_caching_min_ttl', 'error_caching_min_t_t_l')
if 'response_code' in custom_error_response:
custom_error_response['response_code'] = str(custom_error_response['response_code'])
if custom_error_response['error_code'] in existing_responses:
del(existing_responses[custom_error_response['error_code']])
result.append(custom_error_response)
if not purge_custom_error_responses:
result.extend(existing_responses.values())
return ansible_list_to_cloudfront_list(result)
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating custom error responses")
def validate_restrictions(self, config, restrictions, purge_restrictions=False):
try:
if restrictions is None:
if purge_restrictions:
return None
else:
return config
self.validate_required_key('geo_restriction', 'restrictions.geo_restriction', restrictions)
geo_restriction = restrictions.get('geo_restriction')
self.validate_required_key('restriction_type', 'restrictions.geo_restriction.restriction_type', geo_restriction)
existing_restrictions = config.get('geo_restriction', {}).get(geo_restriction['restriction_type'], {}).get('items', [])
geo_restriction_items = geo_restriction.get('items')
if not purge_restrictions:
geo_restriction_items.extend([rest for rest in existing_restrictions if
rest not in geo_restriction_items])
valid_restrictions = ansible_list_to_cloudfront_list(geo_restriction_items)
valid_restrictions['restriction_type'] = geo_restriction.get('restriction_type')
return valid_restrictions
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating restrictions")
def validate_distribution_config_parameters(self, config, default_root_object, ipv6_enabled, http_version, web_acl_id):
try:
config['default_root_object'] = default_root_object or config.get('default_root_object', '')
config['is_i_p_v_6_enabled'] = ipv6_enabled or config.get('i_p_v_6_enabled', self.__default_ipv6_enabled)
if http_version is not None or config.get('http_version'):
self.validate_attribute_with_allowed_values(http_version, 'http_version', self.__valid_http_versions)
config['http_version'] = http_version or config.get('http_version')
if web_acl_id or config.get('web_a_c_l_id'):
config['web_a_c_l_id'] = web_acl_id or config.get('web_a_c_l_id')
return config
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution config parameters")
def validate_common_distribution_parameters(self, config, enabled, aliases, logging, price_class, purge_aliases=False):
try:
if config is None:
config = {}
if aliases is not None:
if not purge_aliases:
aliases.extend([alias for alias in config.get('aliases', {}).get('items', [])
if alias not in aliases])
config['aliases'] = ansible_list_to_cloudfront_list(aliases)
if logging is not None:
config['logging'] = self.validate_logging(logging)
config['enabled'] = enabled or config.get('enabled', self.__default_distribution_enabled)
if price_class is not None:
self.validate_attribute_with_allowed_values(price_class, 'price_class', self.__valid_price_classes)
config['price_class'] = price_class
return config
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating common distribution parameters")
def validate_comment(self, config, comment):
config['comment'] = comment or config.get('comment', "Distribution created by Ansible with datetime stamp " + self.__default_datetime_string)
return config
def validate_caller_reference(self, caller_reference):
return caller_reference or self.__default_datetime_string
def get_first_origin_id_for_default_cache_behavior(self, valid_origins):
try:
if valid_origins is not None:
valid_origins_list = valid_origins.get('items')
if valid_origins_list is not None and isinstance(valid_origins_list, list) and len(valid_origins_list) > 0:
return str(valid_origins_list[0].get('id'))
self.module.fail_json(msg="There are no valid origins from which to specify a target_origin_id for the default_cache_behavior configuration.")
except Exception as e:
self.module.fail_json_aws(e, msg="Error getting first origin_id for default cache behavior")
def validate_attribute_list_with_allowed_list(self, attribute_list, attribute_list_name, allowed_list):
try:
self.validate_is_list(attribute_list, attribute_list_name)
if (isinstance(allowed_list, list) and set(attribute_list) not in allowed_list or
isinstance(allowed_list, set) and not set(allowed_list).issuperset(attribute_list)):
self.module.fail_json(msg='The attribute list {0} must be one of [{1}]'.format(attribute_list_name, ' '.join(str(a) for a in allowed_list)))
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating attribute list with allowed value list")
def validate_attribute_with_allowed_values(self, attribute, attribute_name, allowed_list):
if attribute is not None and attribute not in allowed_list:
self.module.fail_json(msg='The attribute {0} must be one of [{1}]'.format(attribute_name, ' '.join(str(a) for a in allowed_list)))
def validate_distribution_from_caller_reference(self, caller_reference):
try:
distributions = self.__cloudfront_facts_mgr.list_distributions(False)
distribution_name = 'Distribution'
distribution_config_name = 'DistributionConfig'
distribution_ids = [dist.get('Id') for dist in distributions]
for distribution_id in distribution_ids:
config = self.__cloudfront_facts_mgr.get_distribution(distribution_id)
distribution = config.get(distribution_name)
if distribution is not None:
distribution_config = distribution.get(distribution_config_name)
if distribution_config is not None and distribution_config.get('CallerReference') == caller_reference:
distribution['DistributionConfig'] = distribution_config
return distribution
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution from caller reference")
def validate_distribution_from_aliases_caller_reference(self, distribution_id, aliases, caller_reference):
try:
if caller_reference is not None:
return self.validate_distribution_from_caller_reference(caller_reference)
else:
if aliases:
distribution_id = self.validate_distribution_id_from_alias(aliases)
if distribution_id:
return self.__cloudfront_facts_mgr.get_distribution(distribution_id)
return None
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution_id from alias, aliases and caller reference")
def validate_distribution_id_from_alias(self, aliases):
distributions = self.__cloudfront_facts_mgr.list_distributions(False)
if distributions:
for distribution in distributions:
distribution_aliases = distribution.get('Aliases', {}).get('Items', [])
if set(aliases) & set(distribution_aliases):
return distribution['Id']
return None
def wait_until_processed(self, client, wait_timeout, distribution_id, caller_reference):
if distribution_id is None:
distribution_id = self.validate_distribution_id_from_caller_reference(caller_reference=caller_reference)
try:
waiter = client.get_waiter('distribution_deployed')
attempts = 1 + int(wait_timeout / 60)
waiter.wait(Id=distribution_id, WaiterConfig={'MaxAttempts': attempts})
except botocore.exceptions.WaiterError as e:
self.module.fail_json(msg="Timeout waiting for cloudfront action. Waited for {0} seconds before timeout. "
"Error: {1}".format(to_text(wait_timeout), to_native(e)))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error getting distribution {0}".format(distribution_id))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(choices=['present', 'absent'], default='present'),
caller_reference=dict(),
comment=dict(),
distribution_id=dict(),
e_tag=dict(),
tags=dict(type='dict', default={}),
purge_tags=dict(type='bool', default=False),
alias=dict(),
aliases=dict(type='list', default=[]),
purge_aliases=dict(type='bool', default=False),
default_root_object=dict(),
origins=dict(type='list'),
purge_origins=dict(type='bool', default=False),
default_cache_behavior=dict(type='dict'),
cache_behaviors=dict(type='list'),
purge_cache_behaviors=dict(type='bool', default=False),
custom_error_responses=dict(type='list'),
purge_custom_error_responses=dict(type='bool', default=False),
logging=dict(type='dict'),
price_class=dict(),
enabled=dict(type='bool'),
viewer_certificate=dict(type='dict'),
restrictions=dict(type='dict'),
web_acl_id=dict(),
http_version=dict(),
ipv6_enabled=dict(type='bool'),
default_origin_domain_name=dict(),
default_origin_path=dict(),
wait=dict(default=False, type='bool'),
wait_timeout=dict(default=1800, type='int')
))
result = {}
changed = True
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=False,
mutually_exclusive=[
['distribution_id', 'alias'],
['default_origin_domain_name', 'distribution_id'],
['default_origin_domain_name', 'alias'],
]
)
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='cloudfront', region=region, endpoint=ec2_url, **aws_connect_kwargs)
validation_mgr = CloudFrontValidationManager(module)
state = module.params.get('state')
caller_reference = module.params.get('caller_reference')
comment = module.params.get('comment')
e_tag = module.params.get('e_tag')
tags = module.params.get('tags')
purge_tags = module.params.get('purge_tags')
distribution_id = module.params.get('distribution_id')
alias = module.params.get('alias')
aliases = module.params.get('aliases')
purge_aliases = module.params.get('purge_aliases')
default_root_object = module.params.get('default_root_object')
origins = module.params.get('origins')
purge_origins = module.params.get('purge_origins')
default_cache_behavior = module.params.get('default_cache_behavior')
cache_behaviors = module.params.get('cache_behaviors')
purge_cache_behaviors = module.params.get('purge_cache_behaviors')
custom_error_responses = module.params.get('custom_error_responses')
purge_custom_error_responses = module.params.get('purge_custom_error_responses')
logging = module.params.get('logging')
price_class = module.params.get('price_class')
enabled = module.params.get('enabled')
viewer_certificate = module.params.get('viewer_certificate')
restrictions = module.params.get('restrictions')
purge_restrictions = module.params.get('purge_restrictions')
web_acl_id = module.params.get('web_acl_id')
http_version = module.params.get('http_version')
ipv6_enabled = module.params.get('ipv6_enabled')
default_origin_domain_name = module.params.get('default_origin_domain_name')
default_origin_path = module.params.get('default_origin_path')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
if alias and alias not in aliases:
aliases.append(alias)
distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference)
update = state == 'present' and distribution
create = state == 'present' and not distribution
delete = state == 'absent' and distribution
if not (update or create or delete):
module.exit_json(changed=False)
if update or delete:
config = distribution['Distribution']['DistributionConfig']
e_tag = distribution['ETag']
distribution_id = distribution['Distribution']['Id']
else:
config = dict()
if update:
config = camel_dict_to_snake_dict(config, reversible=True)
if create or update:
config = validation_mgr.validate_common_distribution_parameters(config, enabled, aliases, logging, price_class, purge_aliases)
config = validation_mgr.validate_distribution_config_parameters(config, default_root_object, ipv6_enabled, http_version, web_acl_id)
config['origins'] = validation_mgr.validate_origins(client, config.get('origins', {}).get('items', []), origins, default_origin_domain_name,
default_origin_path, create, purge_origins)
config['cache_behaviors'] = validation_mgr.validate_cache_behaviors(config.get('cache_behaviors', {}).get('items', []),
cache_behaviors, config['origins'], purge_cache_behaviors)
config['default_cache_behavior'] = validation_mgr.validate_cache_behavior(config.get('default_cache_behavior', {}),
default_cache_behavior, config['origins'], True)
config['custom_error_responses'] = validation_mgr.validate_custom_error_responses(config.get('custom_error_responses', {}).get('items', []),
custom_error_responses, purge_custom_error_responses)
valid_restrictions = validation_mgr.validate_restrictions(config.get('restrictions', {}), restrictions, purge_restrictions)
if valid_restrictions:
config['restrictions'] = valid_restrictions
valid_viewer_certificate = validation_mgr.validate_viewer_certificate(viewer_certificate)
config = merge_validation_into_config(config, valid_viewer_certificate, 'viewer_certificate')
config = validation_mgr.validate_comment(config, comment)
config = snake_dict_to_camel_dict(config, capitalize_first=True)
if create:
config['CallerReference'] = validation_mgr.validate_caller_reference(caller_reference)
result = create_distribution(client, module, config, ansible_dict_to_boto3_tag_list(tags))
result = camel_dict_to_snake_dict(result)
result['tags'] = list_tags_for_resource(client, module, result['arn'])
if delete:
if config['Enabled']:
config['Enabled'] = False
result = update_distribution(client, module, config, distribution_id, e_tag)
validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference'))
distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference)
# e_tag = distribution['ETag']
result = delete_distribution(client, module, distribution)
if update:
changed = config != distribution['Distribution']['DistributionConfig']
if changed:
result = update_distribution(client, module, config, distribution_id, e_tag)
else:
result = distribution['Distribution']
existing_tags = list_tags_for_resource(client, module, result['ARN'])
distribution['Distribution']['DistributionConfig']['tags'] = existing_tags
changed |= update_tags(client, module, existing_tags, tags, purge_tags, result['ARN'])
result = camel_dict_to_snake_dict(result)
result['distribution_config']['tags'] = config['tags'] = list_tags_for_resource(client, module, result['arn'])
result['diff'] = dict()
diff = recursive_diff(distribution['Distribution']['DistributionConfig'], config)
if diff:
result['diff']['before'] = diff[0]
result['diff']['after'] = diff[1]
if wait and (create or update):
validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference'))
if 'distribution_config' in result:
result.update(result['distribution_config'])
del(result['distribution_config'])
module.exit_json(changed=changed, **result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
sekikn/incubator-airflow
|
tests/providers/amazon/aws/operators/test_emr_terminate_job_flow.py
|
10
|
1847
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest.mock import MagicMock, patch
from airflow.providers.amazon.aws.operators.emr_terminate_job_flow import EmrTerminateJobFlowOperator
TERMINATE_SUCCESS_RETURN = {'ResponseMetadata': {'HTTPStatusCode': 200}}
class TestEmrTerminateJobFlowOperator(unittest.TestCase):
def setUp(self):
# Mock out the emr_client (moto has incorrect response)
mock_emr_client = MagicMock()
mock_emr_client.terminate_job_flows.return_value = TERMINATE_SUCCESS_RETURN
mock_emr_session = MagicMock()
mock_emr_session.client.return_value = mock_emr_client
# Mock out the emr_client creator
self.boto3_session_mock = MagicMock(return_value=mock_emr_session)
def test_execute_terminates_the_job_flow_and_does_not_error(self):
with patch('boto3.session.Session', self.boto3_session_mock):
operator = EmrTerminateJobFlowOperator(
task_id='test_task', job_flow_id='j-8989898989', aws_conn_id='aws_default'
)
operator.execute(None)
|
apache-2.0
|
jdkoreclipse/android_kernel_htc_msm8960
|
tools/perf/scripts/python/futex-contention.py
|
11261
|
1486
|
# futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
|
gpl-2.0
|
deepmind/deepmind-research
|
ogb_lsc/mag/losses.py
|
1
|
6654
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses and related utilities."""
from typing import Mapping, Tuple, Sequence, NamedTuple, Dict, Optional
import jax
import jax.numpy as jnp
import jraph
import numpy as np
# pylint: disable=g-bad-import-order
import datasets
LogsDict = Mapping[str, jnp.ndarray]
class Predictions(NamedTuple):
node_indices: np.ndarray
labels: np.ndarray
predictions: np.ndarray
logits: np.ndarray
def node_classification_loss(
logits: jnp.ndarray,
batch: datasets.Batch,
extra_stats: bool = False,
) -> Tuple[jnp.ndarray, LogsDict]:
"""Gets node-wise classification loss and statistics."""
log_probs = jax.nn.log_softmax(logits)
loss = -jnp.sum(log_probs * batch.node_labels, axis=-1)
num_valid = jnp.sum(batch.label_mask)
labels = jnp.argmax(batch.node_labels, axis=-1)
is_correct = (jnp.argmax(log_probs, axis=-1) == labels)
num_correct = jnp.sum(is_correct * batch.label_mask)
loss = jnp.sum(loss * batch.label_mask) / (num_valid + 1e-8)
accuracy = num_correct / (num_valid + 1e-8)
entropy = -jnp.mean(jnp.sum(jax.nn.softmax(logits) * log_probs, axis=-1))
stats = {
'classification_loss': loss,
'prediction_entropy': entropy,
'accuracy': accuracy,
'num_valid': num_valid,
'num_correct': num_correct,
}
if extra_stats:
for k in range(1, 6):
stats[f'top_{k}_correct'] = topk_correct(logits, labels,
batch.label_mask, k)
return loss, stats
def get_predictions_labels_and_logits(
logits: jnp.ndarray,
batch: datasets.Batch,
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Gets prediction labels and logits."""
mask = batch.label_mask > 0.
indices = batch.node_indices[mask]
logits = logits[mask]
predictions = jnp.argmax(logits, axis=-1)
labels = jnp.argmax(batch.node_labels[mask], axis=-1)
return indices, predictions, labels, logits
def topk_correct(
logits: jnp.ndarray,
labels: jnp.ndarray,
valid_mask: jnp.ndarray,
topk: int,
) -> jnp.ndarray:
"""Calculates top-k accuracy."""
pred_ranking = jnp.argsort(logits, axis=1)[:, ::-1]
pred_ranking = pred_ranking[:, :topk]
is_correct = jnp.any(pred_ranking == labels[:, jnp.newaxis], axis=1)
return (is_correct * valid_mask).sum()
def ensemble_predictions_by_probability_average(
predictions_list: Sequence[Predictions]) -> Predictions:
"""Ensemble predictions by ensembling the probabilities."""
_assert_consistent_predictions(predictions_list)
all_probs = np.stack([
jax.nn.softmax(predictions.logits, axis=-1)
for predictions in predictions_list
],
axis=0)
ensembled_logits = np.log(all_probs.mean(0))
return predictions_list[0]._replace(
logits=ensembled_logits, predictions=np.argmax(ensembled_logits, axis=-1))
def get_accuracy_dict(predictions: Predictions) -> Dict[str, float]:
"""Returns the accuracy dict."""
output_dict = {}
output_dict['num_valid'] = predictions.predictions.shape[0]
matches = (predictions.labels == predictions.predictions)
output_dict['accuracy'] = matches.mean()
pred_ranking = jnp.argsort(predictions.logits, axis=1)[:, ::-1]
for k in range(1, 6):
matches = jnp.any(
pred_ranking[:, :k] == predictions.labels[:, None], axis=1)
output_dict[f'top_{k}_correct'] = matches.mean()
return output_dict
def bgrl_loss(
first_online_predictions: jnp.ndarray,
second_target_projections: jnp.ndarray,
second_online_predictions: jnp.ndarray,
first_target_projections: jnp.ndarray,
symmetrize: bool,
valid_mask: jnp.ndarray,
) -> Tuple[jnp.ndarray, LogsDict]:
"""Implements BGRL loss."""
first_side_node_loss = jnp.sum(
jnp.square(
_l2_normalize(first_online_predictions, axis=-1) -
_l2_normalize(second_target_projections, axis=-1)),
axis=-1)
if symmetrize:
second_side_node_loss = jnp.sum(
jnp.square(
_l2_normalize(second_online_predictions, axis=-1) -
_l2_normalize(first_target_projections, axis=-1)),
axis=-1)
node_loss = first_side_node_loss + second_side_node_loss
else:
node_loss = first_side_node_loss
loss = (node_loss * valid_mask).sum() / (valid_mask.sum() + 1e-6)
return loss, dict(bgrl_loss=loss)
def get_corrupted_view(
graph: jraph.GraphsTuple,
feature_drop_prob: float,
edge_drop_prob: float,
rng_key: jnp.ndarray,
) -> jraph.GraphsTuple:
"""Returns corrupted graph view."""
node_key, edge_key = jax.random.split(rng_key)
def mask_feature(x):
mask = jax.random.bernoulli(node_key, 1 - feature_drop_prob, x.shape)
return x * mask
# Randomly mask features with fixed probability.
nodes = jax.tree_map(mask_feature, graph.nodes)
# Simulate dropping of edges by changing genuine edges to self-loops on
# the padded node.
num_edges = graph.senders.shape[0]
last_node_idx = graph.n_node.sum() - 1
edge_mask = jax.random.bernoulli(edge_key, 1 - edge_drop_prob, [num_edges])
senders = jnp.where(edge_mask, graph.senders, last_node_idx)
receivers = jnp.where(edge_mask, graph.receivers, last_node_idx)
# Note that n_edge will now be invalid since edges in the middle of the list
# will correspond to the final graph. Set n_edge to None to ensure we do not
# accidentally use this.
return graph._replace(
nodes=nodes,
senders=senders,
receivers=receivers,
n_edge=None,
)
def _assert_consistent_predictions(predictions_list: Sequence[Predictions]):
first_predictions = predictions_list[0]
for predictions in predictions_list:
assert np.all(predictions.node_indices == first_predictions.node_indices)
assert np.all(predictions.labels == first_predictions.labels)
assert np.all(
predictions.predictions == np.argmax(predictions.logits, axis=-1))
def _l2_normalize(
x: jnp.ndarray,
axis: Optional[int] = None,
epsilon: float = 1e-6,
) -> jnp.ndarray:
return x * jax.lax.rsqrt(
jnp.sum(jnp.square(x), axis=axis, keepdims=True) + epsilon)
|
apache-2.0
|
michalliu/OpenWrt-Firefly-Libraries
|
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/test/test_import.py
|
12
|
40219
|
# We import importlib *ASAP* in order to test #15386
import importlib
import importlib.util
from importlib._bootstrap import _get_sourcefile
import builtins
import marshal
import os
import platform
import py_compile
import random
import stat
import sys
import unittest
import unittest.mock as mock
import textwrap
import errno
import shutil
import contextlib
import test.support
from test.support import (
EnvironmentVarGuard, TESTFN, check_warnings, forget, is_jython,
make_legacy_pyc, rmtree, run_unittest, swap_attr, swap_item, temp_umask,
unlink, unload, create_empty_file, cpython_only, TESTFN_UNENCODABLE)
from test import script_helper
skip_if_dont_write_bytecode = unittest.skipIf(
sys.dont_write_bytecode,
"test meaningful only when writing bytecode")
def remove_files(name):
for f in (name + ".py",
name + ".pyc",
name + ".pyo",
name + ".pyw",
name + "$py.class"):
unlink(f)
rmtree('__pycache__')
@contextlib.contextmanager
def _ready_to_import(name=None, source=""):
# sets up a temporary directory and removes it
# creates the module file
# temporarily clears the module from sys.modules (if any)
# reverts or removes the module when cleaning up
name = name or "spam"
with script_helper.temp_dir() as tempdir:
path = script_helper.make_script(tempdir, name, source)
old_module = sys.modules.pop(name, None)
try:
sys.path.insert(0, tempdir)
yield name, path
sys.path.remove(tempdir)
finally:
if old_module is not None:
sys.modules[name] = old_module
elif name in sys.modules:
del sys.modules[name]
class ImportTests(unittest.TestCase):
def setUp(self):
remove_files(TESTFN)
importlib.invalidate_caches()
def tearDown(self):
unload(TESTFN)
def test_case_sensitivity(self):
# Brief digression to test that import is case-sensitive: if we got
# this far, we know for sure that "random" exists.
with self.assertRaises(ImportError):
import RAnDoM
def test_double_const(self):
# Another brief digression to test the accuracy of manifest float
# constants.
from test import double_const # don't blink -- that *was* the test
def test_import(self):
def test_with_extension(ext):
# The extension is normally ".py", perhaps ".pyw".
source = TESTFN + ext
pyo = TESTFN + ".pyo"
if is_jython:
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + ".pyc"
with open(source, "w") as f:
print("# This tests Python's ability to import a",
ext, "file.", file=f)
a = random.randrange(1000)
b = random.randrange(1000)
print("a =", a, file=f)
print("b =", b, file=f)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
importlib.invalidate_caches()
try:
try:
mod = __import__(TESTFN)
except ImportError as err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEqual(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEqual(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
forget(TESTFN)
unlink(source)
unlink(pyc)
unlink(pyo)
sys.path.insert(0, os.curdir)
try:
test_with_extension(".py")
if sys.platform.startswith("win"):
for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]:
test_with_extension(ext)
finally:
del sys.path[0]
def test_module_with_large_stack(self, module='longlist'):
# Regression test for http://bugs.python.org/issue561858.
filename = module + '.py'
# Create a file with a list of 65000 elements.
with open(filename, 'w') as f:
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
try:
# Compile & remove .py file; we only need .pyc (or .pyo).
# Bytecode must be relocated from the PEP 3147 bytecode-only location.
py_compile.compile(filename)
finally:
unlink(filename)
# Need to be able to load from current dir.
sys.path.append('')
importlib.invalidate_caches()
namespace = {}
try:
make_legacy_pyc(filename)
# This used to crash.
exec('import ' + module, None, namespace)
finally:
# Cleanup.
del sys.path[-1]
unlink(filename + 'c')
unlink(filename + 'o')
# Remove references to the module (unload the module)
namespace.clear()
try:
del sys.modules[module]
except KeyError:
pass
def test_failing_import_sticks(self):
source = TESTFN + ".py"
with open(source, "w") as f:
print("a = 1/0", file=f)
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
for i in [1, 2, 3]:
self.assertRaises(ZeroDivisionError, __import__, TESTFN)
self.assertNotIn(TESTFN, sys.modules,
"damaged module in sys.modules on %i try" % i)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.support
self.assertIs(x, test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w
import test.support as y
self.assertIs(y, test.support, y.__name__)
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
with open(source, "w") as f:
f.write("a = 1\nb=2\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
self.assertEqual(mod.a, 1, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
with open(source, "w") as f:
f.write("a = 10\nb=20//0\n")
self.assertRaises(ZeroDivisionError, importlib.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.assertIsNotNone(mod, "expected module to be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEqual(mod.a, 10, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
@skip_if_dont_write_bytecode
def test_file_to_source(self):
# check if __file__ points to the source file where available
source = TESTFN + ".py"
with open(source, "w") as f:
f.write("test = None\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertTrue(mod.__file__.endswith('.py'))
os.remove(source)
del sys.modules[TESTFN]
make_legacy_pyc(source)
importlib.invalidate_caches()
mod = __import__(TESTFN)
base, ext = os.path.splitext(mod.__file__)
self.assertIn(ext, ('.pyc', '.pyo'))
finally:
del sys.path[0]
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_import_by_filename(self):
path = os.path.abspath(TESTFN)
encoding = sys.getfilesystemencoding()
try:
path.encode(encoding)
except UnicodeEncodeError:
self.skipTest('path is not encodable to {}'.format(encoding))
with self.assertRaises(ImportError) as c:
__import__(path)
def test_import_in_del_does_not_crash(self):
# Issue 4236
testfn = script_helper.make_script('', TESTFN, textwrap.dedent("""\
import sys
class C:
def __del__(self):
import importlib
sys.argv.insert(0, C())
"""))
script_helper.assert_python_ok(testfn)
def test_timestamp_overflow(self):
# A modification timestamp larger than 2**32 should not be a problem
# when importing a module (issue #11235).
sys.path.insert(0, os.curdir)
try:
source = TESTFN + ".py"
compiled = importlib.util.cache_from_source(source)
with open(source, 'w') as f:
pass
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno != getattr(errno, 'EOVERFLOW', None):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
__import__(TESTFN)
# The pyc file was created.
os.stat(compiled)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_bogus_fromlist(self):
try:
__import__('http', fromlist=['blah'])
except ImportError:
self.fail("fromlist must allow bogus names")
@cpython_only
def test_delete_builtins_import(self):
args = ["-c", "del __builtins__.__import__; import os"]
popen = script_helper.spawn_python(*args)
stdout, stderr = popen.communicate()
self.assertIn(b"ImportError", stdout)
def test_from_import_message_for_nonexistent_module(self):
with self.assertRaisesRegex(ImportError, "^No module named 'bogus'"):
from bogus import foo
def test_from_import_message_for_existing_module(self):
with self.assertRaisesRegex(ImportError, "^cannot import name 'bogus'"):
from re import bogus
@skip_if_dont_write_bytecode
class FilePermissionTests(unittest.TestCase):
# tests for file mode on cached .pyc/.pyo files
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_creation_mode(self):
mask = 0o022
with temp_umask(mask), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
module = __import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"either a .pyc or .pyo file")
stat_info = os.stat(cached_path)
# Check that the umask is respected, and the executable bits
# aren't set.
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)),
oct(0o666 & ~mask))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_mode_issue_2051(self):
# permissions of .pyc should match those of .py, regardless of mask
mode = 0o600
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"either a .pyc or .pyo file")
stat_info = os.stat(cached_path)
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(mode))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_readonly(self):
mode = 0o400
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"either a .pyc or .pyo file")
stat_info = os.stat(cached_path)
expected = mode | 0o200 # Account for fix for issue #6074
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(expected))
def test_pyc_always_writable(self):
# Initially read-only .pyc files on Windows used to cause problems
# with later updates, see issue #6074 for details
with _ready_to_import() as (name, path):
# Write a Python file, make it read-only and import it
with open(path, 'w') as f:
f.write("x = 'original'\n")
# Tweak the mtime of the source to ensure pyc gets updated later
s = os.stat(path)
os.utime(path, (s.st_atime, s.st_mtime-100000000))
os.chmod(path, 0o400)
m = __import__(name)
self.assertEqual(m.x, 'original')
# Change the file and then reimport it
os.chmod(path, 0o600)
with open(path, 'w') as f:
f.write("x = 'rewritten'\n")
unload(name)
importlib.invalidate_caches()
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
# Now delete the source file and check the pyc was rewritten
unlink(path)
unload(name)
importlib.invalidate_caches()
if __debug__:
bytecode_only = path + "c"
else:
bytecode_only = path + "o"
os.rename(importlib.util.cache_from_source(path), bytecode_only)
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
class PycRewritingTests(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.__code__.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = importlib.util.cache_from_source(file_name)
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w") as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
importlib.invalidate_caches()
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
unload(self.module_name)
unlink(self.file_name)
unlink(self.compiled_name)
rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
pyc_file = make_legacy_pyc(self.file_name)
importlib.invalidate_caches()
mod = self.import_module()
self.assertEqual(mod.module_filename, pyc_file)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(12)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = importlib.import_module.__code__
pos = constants.index(1)
constants[pos] = foreign_code
code = type(code)(code.co_argcount, code.co_kwonlyargcount,
code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, tuple(constants),
code.co_names, code.co_varnames, code.co_filename,
code.co_name, code.co_firstlineno, code.co_lnotab,
code.co_freevars, code.co_cellvars)
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
SAMPLES = ('test', 'test\u00e4\u00f6\u00fc\u00df', 'test\u00e9\u00e8',
'test\u00b0\u00b3\u00b2')
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
rmtree(self.path)
sys.path[:] = self.syspath
# Regression test for http://bugs.python.org/issue1293.
def test_trailing_slash(self):
with open(os.path.join(self.path, 'test_trailing_slash.py'), 'w') as f:
f.write("testdata = 'test_trailing_slash'")
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
# Regression test for http://bugs.python.org/issue3677.
@unittest.skipUnless(sys.platform == 'win32', 'Windows-specific')
def test_UNC_path(self):
with open(os.path.join(self.path, 'test_unc_path.py'), 'w') as f:
f.write("testdata = 'test_unc_path'")
importlib.invalidate_caches()
# Create the UNC path, like \\myhost\c$\foo\bar.
path = os.path.abspath(self.path)
import socket
hn = socket.gethostname()
drive = path[0]
unc = "\\\\%s\\%s$"%(hn, drive)
unc += path[2:]
try:
os.listdir(unc)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES):
# See issue #15338
self.skipTest("cannot access administrative share %r" % (unc,))
raise
sys.path.insert(0, unc)
try:
mod = __import__("test_unc_path")
except ImportError as e:
self.fail("could not import 'test_unc_path' from %r: %r"
% (unc, e))
self.assertEqual(mod.testdata, 'test_unc_path')
self.assertTrue(mod.__file__.startswith(unc), mod.__file__)
unload("test_unc_path")
class RelativeImportTests(unittest.TestCase):
def tearDown(self):
unload("test.relimport")
setUp = tearDown
def test_relimport_star(self):
# This will import * from .test_import.
from . import relimport
self.assertTrue(hasattr(relimport, "RelativeImportTests"))
def test_issue3221(self):
# Note for mergers: the 'absolute' tests from the 2.x branch
# are missing in Py3k because implicit relative imports are
# a thing of the past
#
# Regression test for http://bugs.python.org/issue3221.
def check_relative():
exec("from . import relimport", ns)
# Check relative import OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_relative()
# Check relative import OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_relative()
# Check relative import fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
self.assertRaises(SystemError, check_relative)
# Check relative import fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
self.assertRaises(SystemError, check_relative)
# Check relative import fails with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(TypeError, check_relative)
def test_absolute_import_without_future(self):
# If explicit relative import syntax is used, then do not try
# to perform an absolute import in the face of failure.
# Issue #7902.
with self.assertRaises(ImportError):
from .os import sep
self.fail("explicit relative import triggered an "
"implicit absolute import")
class OverridingImportBuiltinTests(unittest.TestCase):
def test_override_builtin(self):
# Test that overriding builtins.__import__ can bypass sys.modules.
import os
def foo():
import os
return os
self.assertEqual(foo(), os) # Quick sanity check.
with swap_attr(builtins, "__import__", lambda *x: 5):
self.assertEqual(foo(), 5)
# Test what happens when we shadow __import__ in globals(); this
# currently does not impact the import process, but if this changes,
# other code will need to change, so keep this test as a tripwire.
with swap_item(globals(), "__import__", lambda *x: 5):
self.assertEqual(foo(), os)
class PycacheTests(unittest.TestCase):
# Test the various PEP 3147 related behaviors.
tag = sys.implementation.cache_tag
def _clean(self):
forget(TESTFN)
rmtree('__pycache__')
unlink(self.source)
def setUp(self):
self.source = TESTFN + '.py'
self._clean()
with open(self.source, 'w') as fp:
print('# This is a test file written by test_import.py', file=fp)
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
def tearDown(self):
assert sys.path[0] == os.curdir, 'Unexpected sys.path[0]'
del sys.path[0]
self._clean()
@skip_if_dont_write_bytecode
def test_import_pyc_path(self):
self.assertFalse(os.path.exists('__pycache__'))
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
self.assertTrue(os.path.exists(os.path.join(
'__pycache__', '{}.{}.py{}'.format(
TESTFN, self.tag, 'c' if __debug__ else 'o'))))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"due to varying filesystem permission semantics (issue #11956)")
@skip_if_dont_write_bytecode
def test_unwritable_directory(self):
# When the umask causes the new __pycache__ directory to be
# unwritable, the import still succeeds but no .pyc file is written.
with temp_umask(0o222):
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
self.assertFalse(os.path.exists(os.path.join(
'__pycache__', '{}.{}.pyc'.format(TESTFN, self.tag))))
@skip_if_dont_write_bytecode
def test_missing_source(self):
# With PEP 3147 cache layout, removing the source but leaving the pyc
# file does not satisfy the import.
__import__(TESTFN)
pyc_file = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_file))
os.remove(self.source)
forget(TESTFN)
importlib.invalidate_caches()
self.assertRaises(ImportError, __import__, TESTFN)
@skip_if_dont_write_bytecode
def test_missing_source_legacy(self):
# Like test_missing_source() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __file__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__file__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
def test___cached__(self):
# Modules now also have an __cached__ that points to the pyc file.
m = __import__(TESTFN)
pyc_file = importlib.util.cache_from_source(TESTFN + '.py')
self.assertEqual(m.__cached__, os.path.join(os.curdir, pyc_file))
@skip_if_dont_write_bytecode
def test___cached___legacy_pyc(self):
# Like test___cached__() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __cached__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__cached__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
@skip_if_dont_write_bytecode
def test_package___cached__(self):
# Like test___cached__ but for packages.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_package___cached___from_pyc(self):
# Like test___cached__ but ensuring __cached__ when imported from a
# PEP 3147 pyc file.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
unload('pep3147.foo')
unload('pep3147')
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_recompute_pyc_same_second(self):
# Even when the source file doesn't change timestamp, a change in
# source size is enough to trigger recomputation of the pyc file.
__import__(TESTFN)
unload(TESTFN)
with open(self.source, 'a') as fp:
print("x = 5", file=fp)
m = __import__(TESTFN)
self.assertEqual(m.x, 5)
class TestSymbolicallyLinkedPackage(unittest.TestCase):
package_name = 'sample'
tagged = package_name + '-tagged'
def setUp(self):
test.support.rmtree(self.tagged)
test.support.rmtree(self.package_name)
self.orig_sys_path = sys.path[:]
# create a sample package; imagine you have a package with a tag and
# you want to symbolically link it from its untagged name.
os.mkdir(self.tagged)
self.addCleanup(test.support.rmtree, self.tagged)
init_file = os.path.join(self.tagged, '__init__.py')
test.support.create_empty_file(init_file)
assert os.path.exists(init_file)
# now create a symlink to the tagged package
# sample -> sample-tagged
os.symlink(self.tagged, self.package_name, target_is_directory=True)
self.addCleanup(test.support.unlink, self.package_name)
importlib.invalidate_caches()
self.assertEqual(os.path.isdir(self.package_name), True)
assert os.path.isfile(os.path.join(self.package_name, '__init__.py'))
def tearDown(self):
sys.path[:] = self.orig_sys_path
# regression test for issue6727
@unittest.skipUnless(
not hasattr(sys, 'getwindowsversion')
or sys.getwindowsversion() >= (6, 0),
"Windows Vista or later required")
@test.support.skip_unless_symlink
def test_symlinked_dir_importable(self):
# make sure sample can only be imported from the current directory.
sys.path[:] = ['.']
assert os.path.exists(self.package_name)
assert os.path.exists(os.path.join(self.package_name, '__init__.py'))
# Try to import the package
importlib.import_module(self.package_name)
@cpython_only
class ImportlibBootstrapTests(unittest.TestCase):
# These tests check that importlib is bootstrapped.
def test_frozen_importlib(self):
mod = sys.modules['_frozen_importlib']
self.assertTrue(mod)
def test_frozen_importlib_is_bootstrap(self):
from importlib import _bootstrap
mod = sys.modules['_frozen_importlib']
self.assertIs(mod, _bootstrap)
self.assertEqual(mod.__name__, 'importlib._bootstrap')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap.py'), mod.__file__)
def test_there_can_be_only_one(self):
# Issue #15386 revealed a tricky loophole in the bootstrapping
# This test is technically redundant, since the bug caused importing
# this test module to crash completely, but it helps prove the point
from importlib import machinery
mod = sys.modules['_frozen_importlib']
self.assertIs(machinery.FileFinder, mod.FileFinder)
@cpython_only
class GetSourcefileTests(unittest.TestCase):
"""Test importlib._bootstrap._get_sourcefile() as used by the C API.
Because of the peculiarities of the need of this function, the tests are
knowingly whitebox tests.
"""
def test_get_sourcefile(self):
# Given a valid bytecode path, return the path to the corresponding
# source file if it exists.
with mock.patch('importlib._bootstrap._path_isfile') as _path_isfile:
_path_isfile.return_value = True;
path = TESTFN + '.pyc'
expect = TESTFN + '.py'
self.assertEqual(_get_sourcefile(path), expect)
def test_get_sourcefile_no_source(self):
# Given a valid bytecode path without a corresponding source path,
# return the original bytecode path.
with mock.patch('importlib._bootstrap._path_isfile') as _path_isfile:
_path_isfile.return_value = False;
path = TESTFN + '.pyc'
self.assertEqual(_get_sourcefile(path), path)
def test_get_sourcefile_bad_ext(self):
# Given a path with an invalid bytecode extension, return the
# bytecode path passed as the argument.
path = TESTFN + '.bad_ext'
self.assertEqual(_get_sourcefile(path), path)
class ImportTracebackTests(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN)
self.old_path = sys.path[:]
sys.path.insert(0, TESTFN)
def tearDown(self):
sys.path[:] = self.old_path
rmtree(TESTFN)
def create_module(self, mod, contents, ext=".py"):
fname = os.path.join(TESTFN, mod + ext)
with open(fname, "w") as f:
f.write(contents)
self.addCleanup(unload, mod)
importlib.invalidate_caches()
return fname
def assert_traceback(self, tb, files):
deduped_files = []
while tb:
code = tb.tb_frame.f_code
fn = code.co_filename
if not deduped_files or fn != deduped_files[-1]:
deduped_files.append(fn)
tb = tb.tb_next
self.assertEqual(len(deduped_files), len(files), deduped_files)
for fn, pat in zip(deduped_files, files):
self.assertIn(pat, fn)
def test_nonexistent_module(self):
try:
# assertRaises() clears __traceback__
import nonexistent_xyzzy
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__])
def test_nonexistent_module_nested(self):
self.create_module("foo", "import nonexistent_xyzzy")
try:
import foo
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure(self):
self.create_module("foo", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure_nested(self):
self.create_module("foo", "import bar")
self.create_module("bar", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py', 'bar.py'])
# A few more examples from issue #15425
def test_syntax_error(self):
self.create_module("foo", "invalid syntax is invalid")
try:
import foo
except SyntaxError as e:
tb = e.__traceback__
else:
self.fail("SyntaxError should have been raised")
self.assert_traceback(tb, [__file__])
def _setup_broken_package(self, parent, child):
pkg_name = "_parent_foo"
self.addCleanup(unload, pkg_name)
pkg_path = os.path.join(TESTFN, pkg_name)
os.mkdir(pkg_path)
# Touch the __init__.py
init_path = os.path.join(pkg_path, '__init__.py')
with open(init_path, 'w') as f:
f.write(parent)
bar_path = os.path.join(pkg_path, 'bar.py')
with open(bar_path, 'w') as f:
f.write(child)
importlib.invalidate_caches()
return init_path, bar_path
def test_broken_submodule(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_from(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_parent(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
def test_broken_parent_from(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
@cpython_only
def test_import_bug(self):
# We simulate a bug in importlib and check that it's not stripped
# away from the traceback.
self.create_module("foo", "")
importlib = sys.modules['_frozen_importlib']
if 'load_module' in vars(importlib.SourceLoader):
old_exec_module = importlib.SourceLoader.exec_module
else:
old_exec_module = None
try:
def exec_module(*args):
1/0
importlib.SourceLoader.exec_module = exec_module
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, '<frozen importlib', __file__])
finally:
if old_exec_module is None:
del importlib.SourceLoader.exec_module
else:
importlib.SourceLoader.exec_module = old_exec_module
@unittest.skipUnless(TESTFN_UNENCODABLE, 'need TESTFN_UNENCODABLE')
def test_unencodable_filename(self):
# Issue #11619: The Python parser and the import machinery must not
# encode filenames, especially on Windows
pyname = script_helper.make_script('', TESTFN_UNENCODABLE, 'pass')
self.addCleanup(unlink, pyname)
name = pyname[:-3]
script_helper.assert_python_ok("-c", "mod = __import__(%a)" % name,
__isolated=False)
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
unittest.main()
|
gpl-2.0
|
kenshay/ImageScripter
|
Script_Runner/PYTHON/Lib/lib2to3/fixer_base.py
|
41
|
6690
|
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Base class for fixers (optional, but recommended)."""
# Python imports
import itertools
# Local imports
from .patcomp import PatternCompiler
from . import pygram
from .fixer_util import does_tree_import
class BaseFix(object):
"""Optional base class for fixers.
The subclass name must be FixFooBar where FooBar is the result of
removing underscores and capitalizing the words of the fix name.
For example, the class name for a fixer named 'has_key' should be
FixHasKey.
"""
PATTERN = None # Most subclasses should override with a string literal
pattern = None # Compiled pattern, set by compile_pattern()
pattern_tree = None # Tree representation of the pattern
options = None # Options object passed to initializer
filename = None # The filename (set by set_filename)
numbers = itertools.count(1) # For new_name()
used_names = set() # A set of all used NAMEs
order = "post" # Does the fixer prefer pre- or post-order traversal
explicit = False # Is this ignored by refactor.py -f all?
run_order = 5 # Fixers will be sorted by run order before execution
# Lower numbers will be run first.
_accept_type = None # [Advanced and not public] This tells RefactoringTool
# which node type to accept when there's not a pattern.
keep_line_order = False # For the bottom matcher: match with the
# original line order
BM_compatible = False # Compatibility with the bottom matching
# module; every fixer should set this
# manually
# Shortcut for access to Python grammar symbols
syms = pygram.python_symbols
def __init__(self, options, log):
"""Initializer. Subclass may override.
Args:
options: a dict containing the options passed to RefactoringTool
that could be used to customize the fixer through the command line.
log: a list to append warnings and other messages to.
"""
self.options = options
self.log = log
self.compile_pattern()
def compile_pattern(self):
"""Compiles self.PATTERN into self.pattern.
Subclass may override if it doesn't want to use
self.{pattern,PATTERN} in .match().
"""
if self.PATTERN is not None:
PC = PatternCompiler()
self.pattern, self.pattern_tree = PC.compile_pattern(self.PATTERN,
with_tree=True)
def set_filename(self, filename):
"""Set the filename.
The main refactoring tool should call this.
"""
self.filename = filename
def match(self, node):
"""Returns match for a given parse tree node.
Should return a true or false object (not necessarily a bool).
It may return a non-empty dict of matching sub-nodes as
returned by a matching pattern.
Subclass may override.
"""
results = {"node": node}
return self.pattern.match(node, results) and results
def transform(self, node, results):
"""Returns the transformation for a given parse tree node.
Args:
node: the root of the parse tree that matched the fixer.
results: a dict mapping symbolic names to part of the match.
Returns:
None, or a node that is a modified copy of the
argument node. The node argument may also be modified in-place to
effect the same change.
Subclass *must* override.
"""
raise NotImplementedError()
def new_name(self, template="xxx_todo_changeme"):
"""Return a string suitable for use as an identifier
The new name is guaranteed not to conflict with other identifiers.
"""
name = template
while name in self.used_names:
name = template + str(next(self.numbers))
self.used_names.add(name)
return name
def log_message(self, message):
if self.first_log:
self.first_log = False
self.log.append("### In file %s ###" % self.filename)
self.log.append(message)
def cannot_convert(self, node, reason=None):
"""Warn the user that a given chunk of code is not valid Python 3,
but that it cannot be converted automatically.
First argument is the top-level node for the code in question.
Optional second argument is why it can't be converted.
"""
lineno = node.get_lineno()
for_output = node.clone()
for_output.prefix = ""
msg = "Line %d: could not convert: %s"
self.log_message(msg % (lineno, for_output))
if reason:
self.log_message(reason)
def warning(self, node, reason):
"""Used for warning the user about possible uncertainty in the
translation.
First argument is the top-level node for the code in question.
Optional second argument is why it can't be converted.
"""
lineno = node.get_lineno()
self.log_message("Line %d: %s" % (lineno, reason))
def start_tree(self, tree, filename):
"""Some fixers need to maintain tree-wide state.
This method is called once, at the start of tree fix-up.
tree - the root node of the tree to be processed.
filename - the name of the file the tree came from.
"""
self.used_names = tree.used_names
self.set_filename(filename)
self.numbers = itertools.count(1)
self.first_log = True
def finish_tree(self, tree, filename):
"""Some fixers need to maintain tree-wide state.
This method is called once, at the conclusion of tree fix-up.
tree - the root node of the tree to be processed.
filename - the name of the file the tree came from.
"""
pass
class ConditionalFix(BaseFix):
""" Base class for fixers which not execute if an import is found. """
# This is the name of the import which, if found, will cause the test to be skipped
skip_on = None
def start_tree(self, *args):
super(ConditionalFix, self).start_tree(*args)
self._should_skip = None
def should_skip(self, node):
if self._should_skip is not None:
return self._should_skip
pkg = self.skip_on.split(".")
name = pkg[-1]
pkg = ".".join(pkg[:-1])
self._should_skip = does_tree_import(pkg, name, node)
return self._should_skip
|
gpl-3.0
|
dhp-denero/LibrERP
|
crm_lead_correct/crm_case_stage.py
|
2
|
1579
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2013 Didotech SRL (info at didotech.com)
# All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from openerp.osv import orm, fields
class crm_case_stage(orm.Model):
_inherit = 'crm.case.stage'
_columns = {
'name': fields.char('Name', size=64, required=True, translate=False),
}
|
agpl-3.0
|
sahiljain/catapult
|
dashboard/dashboard/benchmark_health_report_test.py
|
2
|
5261
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import unittest
import webapp2
import webtest
from google.appengine.ext import ndb
from dashboard import benchmark_health_report
from dashboard import update_test_suites
from dashboard.common import stored_object
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import graph_data
class BenchmarkHealthReportTest(testing_common.TestCase):
def setUp(self):
super(BenchmarkHealthReportTest, self).setUp()
app = webapp2.WSGIApplication(
[('/benchmark_health_report',
benchmark_health_report.BenchmarkHealthReportHandler)])
self.testapp = webtest.TestApp(app)
def _AddAnomalyEntities(
self, revision_ranges, test_key, bug_id=None):
"""Adds a group of Anomaly entities to the datastore."""
urlsafe_keys = []
for start_rev, end_rev in revision_ranges:
anomaly_key = anomaly.Anomaly(
start_revision=start_rev, end_revision=end_rev,
test=test_key, bug_id=bug_id,
median_before_anomaly=100, median_after_anomaly=200).put()
urlsafe_keys.append(anomaly_key.urlsafe())
return urlsafe_keys
def _AddTests(self):
"""Adds sample TestMetadata entities and returns their keys."""
testing_common.AddTests(['ChromiumPerf'], ['linux'], {
'sunspider': {
'Total': {},
'ref': {},
},
'page_cycler': {
'load_time': {
'cnn.com': {},
'google.com': {},
}
}
})
tests = graph_data.TestMetadata.query()
for test in tests:
test.improvement_direction = anomaly.DOWN
ndb.put_multi(tests)
def _AddCachedSuites(self):
test_suites = {
'sunspider': {
'mas': {'ChromiumPerf': {'mac': False, 'linux': False}},
'mon': [],
},
'page_cycler': {
'mas': {'ChromiumPerf': {'linux': False}, 'CrOS': {'foo': False}},
'mon': ['load_time'],
},
'speedometer': {
'mas': {'CrOS': {'foo': False, 'bar': False}},
'mon': [],
}
}
key = update_test_suites._NamespaceKey(
update_test_suites._LIST_SUITES_CACHE_KEY)
stored_object.Set(key, test_suites)
def testGet(self):
response = self.testapp.get('/benchmark_health_report')
self.assertEqual('text/html', response.content_type)
self.assertIn('Chrome Performance Dashboard', response.body)
def testPost_MasterArgument_ListsTestsForMaster(self):
self._AddCachedSuites()
response = self.testapp.post(
'/benchmark_health_report', {'master': 'CrOS'})
benchmark_list = self.GetJsonValue(response, 'benchmarks')
self.assertItemsEqual(benchmark_list, [{
'name': 'page_cycler',
'monitored': True,
'bots': ['foo'],
}, {
'name': 'speedometer',
'monitored': False,
'bots': ['bar', 'foo'],
}])
def testPost_BenchmarkArgument_ListsAlertsAndBots(self):
self._AddCachedSuites()
self._AddTests()
self._AddAnomalyEntities(
[(200, 400), (600, 800)],
utils.TestKey('ChromiumPerf/linux/page_cycler/load_time'))
self._AddAnomalyEntities(
[(500, 700)],
utils.TestKey('ChromiumPerf/linux/page_cycler/load_time/cnn.com'))
response = self.testapp.post(
'/benchmark_health_report', {
'benchmark': 'page_cycler',
'num_days': '30',
'master': 'ChromiumPerf',
})
bots = self.GetJsonValue(response, 'bots')
self.assertItemsEqual(bots, ['linux'])
self.assertTrue(self.GetJsonValue(response, 'monitored'))
alerts = self.GetJsonValue(response, 'alerts')
self.assertEqual(3, len(alerts))
def testPost_Benchmark_NotMonitored(self):
self._AddCachedSuites()
self._AddTests()
response = self.testapp.post(
'/benchmark_health_report', {
'benchmark': 'sunspider',
'num_days': '30',
'master': 'ChromiumPerf',
})
self.assertFalse(self.GetJsonValue(response, 'monitored'))
def testPost_BenchmarkArgumentNumDaysArgument_ListsCorrectAlerts(self):
self._AddCachedSuites()
self._AddTests()
self._AddAnomalyEntities(
[(200, 400), (600, 800)],
utils.TestKey('ChromiumPerf/linux/page_cycler/load_time'))
self._AddAnomalyEntities(
[(500, 700)],
utils.TestKey('ChromiumPerf/linux/page_cycler/load_time/cnn.com'))
anomalies = anomaly.Anomaly.query().fetch()
anomalies[0].timestamp = datetime.datetime.now() - datetime.timedelta(
days=20)
anomalies[0].put()
response = self.testapp.post(
'/benchmark_health_report',
{'benchmark': 'page_cycler', 'num_days': '5', 'master': 'ChromiumPerf'})
bots = self.GetJsonValue(response, 'bots')
self.assertItemsEqual(bots, ['linux'])
self.assertTrue(self.GetJsonValue(response, 'monitored'))
alerts = self.GetJsonValue(response, 'alerts')
self.assertEqual(2, len(alerts))
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
eayunstack/fuel-web
|
nailgun/nailgun/rpc/__init__.py
|
3
|
2731
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import functools
import amqp.exceptions as amqp_exceptions
from kombu import Connection
from kombu import Exchange
from oslo_serialization import jsonutils
from kombu import Queue
from nailgun.logger import logger
from nailgun.settings import settings
from nailgun.rpc import utils
creds = (
("userid", "guest"),
("password", "guest"),
("hostname", "localhost"),
("port", "5672"),
)
conn_str = 'amqp://{0}:{1}@{2}:{3}//'.format(
*[settings.RABBITMQ.get(*cred) for cred in creds]
)
naily_exchange = Exchange(
'naily',
'topic',
durable=True
)
naily_queue = Queue(
'naily',
exchange=naily_exchange,
routing_key='naily'
)
naily_service_exchange = Exchange(
'naily_service',
'fanout',
durable=False,
auto_delete=True
)
naily_service_queue = Queue(
'naily_service',
exchange=naily_service_exchange
)
nailgun_exchange = Exchange(
'nailgun',
'topic',
durable=True
)
nailgun_queue = Queue(
'nailgun',
exchange=nailgun_exchange,
routing_key='nailgun'
)
def cast(name, message, service=False):
logger.debug(
"RPC cast to orchestrator:\n{0}".format(
jsonutils.dumps(message, indent=4)
)
)
use_queue = naily_queue if not service else naily_service_queue
use_exchange = naily_exchange if not service else naily_service_exchange
with Connection(conn_str) as conn:
with conn.Producer(serializer='json') as producer:
publish = functools.partial(producer.publish, message,
exchange=use_exchange, routing_key=name, declare=[use_queue])
try:
publish()
except amqp_exceptions.PreconditionFailed as e:
logger.warning(six.text_type(e))
# (dshulyak) we should drop both exchanges/queues in order
# for astute to be able to recover temporary queues
utils.delete_entities(
conn, naily_service_exchange, naily_service_queue,
naily_exchange, naily_queue)
publish()
|
apache-2.0
|
blackzw/openwrt_sdk_dev1
|
staging_dir/host/lib/scons-2.1.0/SCons/Tool/cc.py
|
21
|
3747
|
"""SCons.Tool.cc
Tool-specific initialization for generic Posix C compilers.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/cc.py 5357 2011/09/09 21:31:03 bdeegan"
import SCons.Tool
import SCons.Defaults
import SCons.Util
CSuffixes = ['.c', '.m']
if not SCons.Util.case_sensitive_suffixes('.c', '.C'):
CSuffixes.append('.C')
def add_common_cc_variables(env):
"""
Add underlying common "C compiler" variables that
are used by multiple tools (specifically, c++).
"""
if '_CCCOMCOM' not in env:
env['_CCCOMCOM'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS'
# It's a hack to test for darwin here, but the alternative
# of creating an applecc.py to contain this seems overkill.
# Maybe someday the Apple platform will require more setup and
# this logic will be moved.
env['FRAMEWORKS'] = SCons.Util.CLVar('')
env['FRAMEWORKPATH'] = SCons.Util.CLVar('')
if env['PLATFORM'] == 'darwin':
env['_CCCOMCOM'] = env['_CCCOMCOM'] + ' $_FRAMEWORKPATH'
if 'CCFLAGS' not in env:
env['CCFLAGS'] = SCons.Util.CLVar('')
if 'SHCCFLAGS' not in env:
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS')
def generate(env):
"""
Add Builders and construction variables for C compilers to an Environment.
"""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
add_common_cc_variables(env)
env['CC'] = 'cc'
env['CFLAGS'] = SCons.Util.CLVar('')
env['CCCOM'] = '$CC -o $TARGET -c $CFLAGS $CCFLAGS $_CCCOMCOM $SOURCES'
env['SHCC'] = '$CC'
env['SHCFLAGS'] = SCons.Util.CLVar('$CFLAGS')
env['SHCCCOM'] = '$SHCC -o $TARGET -c $SHCFLAGS $SHCCFLAGS $_CCCOMCOM $SOURCES'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
env['SHOBJSUFFIX'] = '.os'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 0
env['CFILESUFFIX'] = '.c'
def exists(env):
return env.Detect('cc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
gpl-2.0
|
adhocish/MELEEDB
|
meleedb/main.py
|
1
|
1570
|
import logging
import datetime
import os
from scrapy.crawler import CrawlerProcess
from scrapy.settings import Settings
from scrapy.utils.log import configure_logging
import spiders
def run():
# Logging settings
configure_logging(install_root_handler=False)
logging.basicConfig(
datefmt='%Y-%m-%d %H:%M:%S',
filemode='w',
filename='output/' + datetime.datetime.utcnow().strftime("%Y%m%d%H%M%S") + '.log',
format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO
)
# Project settings
settings = Settings()
settings.setmodule('settings', priority='project')
# Class to run parallel spiders
process = CrawlerProcess(settings)
process.crawl(spiders.LiquipediaSpider)
# Block until crawling is complete
process.start()
def handle_cmdline_arguments():
# Output to JSON file
# Create new database
# Update database?
# Logging
# Specify specific tournament(s)
# parser = argparse.ArgumentParser(description='Creates an .m3u playlist from given media files.')
# parser.add_argument('-u', '--upload', help='Attempt to upload files to remote.', action='store_true')
# parser.add_argument('-r', '--recursive', help='Process subdirectories as well.', action='store_true')
# parser.add_argument('-n', '--name', type=str, help='Name of playlist.')
# parser.add_argument('files', type=str, nargs='+', help='Absolute paths to files.')
# args = parser.parse_args()
run()
if __name__ == "__main__":
handle_cmdline_arguments()
|
gpl-3.0
|
Botong/Data_Pipeline_Byte1
|
lib/flask/exthook.py
|
783
|
5087
|
# -*- coding: utf-8 -*-
"""
flask.exthook
~~~~~~~~~~~~~
Redirect imports for extensions. This module basically makes it possible
for us to transition from flaskext.foo to flask_foo without having to
force all extensions to upgrade at the same time.
When a user does ``from flask.ext.foo import bar`` it will attempt to
import ``from flask_foo import bar`` first and when that fails it will
try to import ``from flaskext.foo import bar``.
We're switching from namespace packages because it was just too painful for
everybody involved.
This is used by `flask.ext`.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
from ._compat import reraise
class ExtensionImporter(object):
"""This importer redirects imports from this submodule to other locations.
This makes it possible to transition from the old flaskext.name to the
newer flask_name without people having a hard time.
"""
def __init__(self, module_choices, wrapper_module):
self.module_choices = module_choices
self.wrapper_module = wrapper_module
self.prefix = wrapper_module + '.'
self.prefix_cutoff = wrapper_module.count('.') + 1
def __eq__(self, other):
return self.__class__.__module__ == other.__class__.__module__ and \
self.__class__.__name__ == other.__class__.__name__ and \
self.wrapper_module == other.wrapper_module and \
self.module_choices == other.module_choices
def __ne__(self, other):
return not self.__eq__(other)
def install(self):
sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self]
def find_module(self, fullname, path=None):
if fullname.startswith(self.prefix):
return self
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff]
for path in self.module_choices:
realname = path % modname
try:
__import__(realname)
except ImportError:
exc_type, exc_value, tb = sys.exc_info()
# since we only establish the entry in sys.modules at the
# very this seems to be redundant, but if recursive imports
# happen we will call into the move import a second time.
# On the second invocation we still don't have an entry for
# fullname in sys.modules, but we will end up with the same
# fake module name and that import will succeed since this
# one already has a temporary entry in the modules dict.
# Since this one "succeeded" temporarily that second
# invocation now will have created a fullname entry in
# sys.modules which we have to kill.
sys.modules.pop(fullname, None)
# If it's an important traceback we reraise it, otherwise
# we swallow it and try the next choice. The skipped frame
# is the one from __import__ above which we don't care about
if self.is_important_traceback(realname, tb):
reraise(exc_type, exc_value, tb.tb_next)
continue
module = sys.modules[fullname] = sys.modules[realname]
if '.' not in modname:
setattr(sys.modules[self.wrapper_module], modname, module)
return module
raise ImportError('No module named %s' % fullname)
def is_important_traceback(self, important_module, tb):
"""Walks a traceback's frames and checks if any of the frames
originated in the given important module. If that is the case then we
were able to import the module itself but apparently something went
wrong when the module was imported. (Eg: import of an import failed).
"""
while tb is not None:
if self.is_important_frame(important_module, tb):
return True
tb = tb.tb_next
return False
def is_important_frame(self, important_module, tb):
"""Checks a single frame if it's important."""
g = tb.tb_frame.f_globals
if '__name__' not in g:
return False
module_name = g['__name__']
# Python 2.7 Behavior. Modules are cleaned up late so the
# name shows up properly here. Success!
if module_name == important_module:
return True
# Some python versions will will clean up modules so early that the
# module name at that point is no longer set. Try guessing from
# the filename then.
filename = os.path.abspath(tb.tb_frame.f_code.co_filename)
test_string = os.path.sep + important_module.replace('.', os.path.sep)
return test_string + '.py' in filename or \
test_string + os.path.sep + '__init__.py' in filename
|
apache-2.0
|
olexiim/edx-platform
|
lms/djangoapps/django_comment_client/base/tests.py
|
12
|
42022
|
import logging
import json
from django.test.client import Client, RequestFactory
from django.test.utils import override_settings
from django.contrib.auth.models import User
from django.core.management import call_command
from django.core.urlresolvers import reverse
from mock import patch, ANY, Mock
from nose.tools import assert_true, assert_equal # pylint: disable=no-name-in-module
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.tests.django_utils import TEST_DATA_MOCK_MODULESTORE
from django_comment_client.base import views
from django_comment_client.tests.group_id import CohortedTopicGroupIdTestMixin, NonCohortedTopicGroupIdTestMixin, GroupIdAssertionMixin
from django_comment_client.tests.utils import CohortedContentTestCase
from django_comment_client.tests.unicode import UnicodeTestMixin
from django_comment_common.models import Role
from django_comment_common.utils import seed_permissions_roles
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
log = logging.getLogger(__name__)
CS_PREFIX = "http://localhost:4567/api/v1"
# pylint: disable=missing-docstring
class MockRequestSetupMixin(object):
def _create_response_mock(self, data):
return Mock(text=json.dumps(data), json=Mock(return_value=data))
def _set_mock_request_data(self, mock_request, data):
mock_request.return_value = self._create_response_mock(data)
@patch('lms.lib.comment_client.utils.requests.request')
class CreateThreadGroupIdTestCase(
MockRequestSetupMixin,
CohortedContentTestCase,
CohortedTopicGroupIdTestMixin,
NonCohortedTopicGroupIdTestMixin
):
cs_endpoint = "/threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
self._set_mock_request_data(mock_request, {})
mock_request.return_value.status_code = 200
request_data = {"body": "body", "title": "title", "thread_type": "discussion"}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().post("dummy_url", request_data)
request.user = user
request.view_name = "create_thread"
return views.create_thread(
request,
course_id=self.course.id.to_deprecated_string(),
commentable_id=commentable_id
)
def test_group_info_in_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
None
)
self._assert_json_response_contains_group_info(response)
@patch('lms.lib.comment_client.utils.requests.request')
class ThreadActionGroupIdTestCase(
MockRequestSetupMixin,
CohortedContentTestCase,
GroupIdAssertionMixin
):
def call_view(
self,
view_name,
mock_request,
user=None,
post_params=None,
view_args=None
):
self._set_mock_request_data(
mock_request,
{
"user_id": str(self.student.id),
"group_id": self.student_cohort.id,
"closed": False,
"type": "thread"
}
)
mock_request.return_value.status_code = 200
request = RequestFactory().post("dummy_url", post_params or {})
request.user = user or self.student
request.view_name = view_name
return getattr(views, view_name)(
request,
course_id=self.course.id.to_deprecated_string(),
thread_id="dummy",
**(view_args or {})
)
def test_update(self, mock_request):
response = self.call_view(
"update_thread",
mock_request,
post_params={"body": "body", "title": "title"}
)
self._assert_json_response_contains_group_info(response)
def test_delete(self, mock_request):
response = self.call_view("delete_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_vote(self, mock_request):
response = self.call_view(
"vote_for_thread",
mock_request,
view_args={"value": "up"}
)
self._assert_json_response_contains_group_info(response)
response = self.call_view("undo_vote_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_flag(self, mock_request):
response = self.call_view("flag_abuse_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
response = self.call_view("un_flag_abuse_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_pin(self, mock_request):
response = self.call_view(
"pin_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(response)
response = self.call_view(
"un_pin_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(response)
def test_openclose(self, mock_request):
response = self.call_view(
"openclose_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(
response,
lambda d: d['content']
)
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
@patch('lms.lib.comment_client.utils.requests.request')
class ViewsTestCase(UrlResetMixin, ModuleStoreTestCase, MockRequestSetupMixin):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Patching the ENABLE_DISCUSSION_SERVICE value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(ViewsTestCase, self).setUp(create_user=False)
# create a course
self.course = CourseFactory.create(
org='MITx', course='999',
discussion_topics={"Some Topic": {"id": "some_topic"}},
display_name='Robot Super Course',
)
self.course_id = self.course.id
# seed the forums permissions and roles
call_command('seed_permissions_roles', self.course_id.to_deprecated_string())
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with patch('student.models.cc.User.save'):
uname = 'student'
email = 'student@edx.org'
password = 'test'
# Create the user and make them active so we can log them in.
self.student = User.objects.create_user(uname, email, password)
self.student.is_active = True
self.student.save()
# Enroll the student in the course
CourseEnrollmentFactory(user=self.student,
course_id=self.course_id)
self.client = Client()
assert_true(self.client.login(username='student', password='test'))
def test_create_thread(self, mock_request):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"thread_type": "discussion",
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": False,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0,
})
thread = {
"thread_type": "discussion",
"body": ["this is a post"],
"anonymous_to_peers": ["false"],
"auto_subscribe": ["false"],
"anonymous": ["false"],
"title": ["Hello"],
}
url = reverse('create_thread', kwargs={'commentable_id': 'i4x-MITx-999-course-Robot_Super_Course',
'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url, data=thread)
assert_true(mock_request.called)
mock_request.assert_called_with(
'post',
'{prefix}/i4x-MITx-999-course-Robot_Super_Course/threads'.format(prefix=CS_PREFIX),
data={
'thread_type': 'discussion',
'body': u'this is a post',
'anonymous_to_peers': False, 'user_id': 1,
'title': u'Hello',
'commentable_id': u'i4x-MITx-999-course-Robot_Super_Course',
'anonymous': False,
'course_id': u'MITx/999/Robot_Super_Course',
},
params={'request_id': ANY},
headers=ANY,
timeout=5
)
assert_equal(response.status_code, 200)
def test_delete_comment(self, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
test_comment_id = "test_comment_id"
request = RequestFactory().post("dummy_url", {"id": test_comment_id})
request.user = self.student
request.view_name = "delete_comment"
response = views.delete_comment(request, course_id=self.course.id.to_deprecated_string(), comment_id=test_comment_id)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
args = mock_request.call_args[0]
self.assertEqual(args[0], "delete")
self.assertTrue(args[1].endswith("/{}".format(test_comment_id)))
def _setup_mock_request(self, mock_request, include_depth=False):
"""
Ensure that mock_request returns the data necessary to make views
function correctly
"""
mock_request.return_value.status_code = 200
data = {
"user_id": str(self.student.id),
"closed": False,
}
if include_depth:
data["depth"] = 0
self._set_mock_request_data(mock_request, data)
def _test_request_error(self, view_name, view_kwargs, data, mock_request):
"""
Submit a request against the given view with the given data and ensure
that the result is a 400 error and that no data was posted using
mock_request
"""
self._setup_mock_request(mock_request, include_depth=(view_name == "create_sub_comment"))
response = self.client.post(reverse(view_name, kwargs=view_kwargs), data=data)
self.assertEqual(response.status_code, 400)
for call in mock_request.call_args_list:
self.assertEqual(call[0][0].lower(), "get")
def test_create_thread_no_title(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo"},
mock_request
)
def test_create_thread_empty_title(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo", "title": " "},
mock_request
)
def test_create_thread_no_body(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"title": "foo"},
mock_request
)
def test_create_thread_empty_body(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " ", "title": "foo"},
mock_request
)
def test_update_thread_no_title(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo"},
mock_request
)
def test_update_thread_empty_title(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo", "title": " "},
mock_request
)
def test_update_thread_no_body(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"title": "foo"},
mock_request
)
def test_update_thread_empty_body(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " ", "title": "foo"},
mock_request
)
def test_update_thread_course_topic(self, mock_request):
self._setup_mock_request(mock_request)
response = self.client.post(
reverse("update_thread", kwargs={"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()}),
data={"body": "foo", "title": "foo", "commentable_id": "some_topic"}
)
self.assertEqual(response.status_code, 200)
@patch('django_comment_client.base.views.get_discussion_categories_ids', return_value=["test_commentable"])
def test_update_thread_wrong_commentable_id(self, mock_get_discussion_id_map, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo", "title": "foo", "commentable_id": "wrong_commentable"},
mock_request
)
def test_create_comment_no_body(self, mock_request):
self._test_request_error(
"create_comment",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{},
mock_request
)
def test_create_comment_empty_body(self, mock_request):
self._test_request_error(
"create_comment",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " "},
mock_request
)
def test_create_sub_comment_no_body(self, mock_request):
self._test_request_error(
"create_sub_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{},
mock_request
)
def test_create_sub_comment_empty_body(self, mock_request):
self._test_request_error(
"create_sub_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " "},
mock_request
)
def test_update_comment_no_body(self, mock_request):
self._test_request_error(
"update_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{},
mock_request
)
def test_update_comment_empty_body(self, mock_request):
self._test_request_error(
"update_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " "},
mock_request
)
def test_update_comment_basic(self, mock_request):
self._setup_mock_request(mock_request)
comment_id = "test_comment_id"
updated_body = "updated body"
response = self.client.post(
reverse(
"update_comment",
kwargs={"course_id": self.course_id.to_deprecated_string(), "comment_id": comment_id}
),
data={"body": updated_body}
)
self.assertEqual(response.status_code, 200)
mock_request.assert_called_with(
"put",
"{prefix}/comments/{comment_id}".format(prefix=CS_PREFIX, comment_id=comment_id),
headers=ANY,
params=ANY,
timeout=ANY,
data={"body": updated_body}
)
def test_flag_thread_open(self, mock_request):
self.flag_thread(mock_request, False)
def test_flag_thread_close(self, mock_request):
self.flag_thread(mock_request, True)
def flag_thread(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1", "username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [1],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0,
})
url = reverse('flag_abuse_for_thread', kwargs={'thread_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/threads/518d4237b023791dca00000d/abuse_flag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_un_flag_thread_open(self, mock_request):
self.un_flag_thread(mock_request, False)
def test_un_flag_thread_close(self, mock_request):
self.un_flag_thread(mock_request, True)
def un_flag_thread(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0
})
url = reverse('un_flag_abuse_for_thread', kwargs={'thread_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/threads/518d4237b023791dca00000d/abuse_unflag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_flag_comment_open(self, mock_request):
self.flag_comment(mock_request, False)
def test_flag_comment_close(self, mock_request):
self.flag_comment(mock_request, True)
def flag_comment(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"body": "this is a comment",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [1],
"type": "comment",
"endorsed": False
})
url = reverse('flag_abuse_for_comment', kwargs={'comment_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/comments/518d4237b023791dca00000d/abuse_flag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_un_flag_comment_open(self, mock_request):
self.un_flag_comment(mock_request, False)
def test_un_flag_comment_close(self, mock_request):
self.un_flag_comment(mock_request, True)
def un_flag_comment(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"body": "this is a comment",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "comment",
"endorsed": False
})
url = reverse('un_flag_abuse_for_comment', kwargs={'comment_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/comments/518d4237b023791dca00000d/abuse_unflag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
@patch("lms.lib.comment_client.utils.requests.request")
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
class ViewPermissionsTestCase(UrlResetMixin, ModuleStoreTestCase, MockRequestSetupMixin):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ViewPermissionsTestCase, self).setUp()
self.password = "test password"
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create(password=self.password)
self.moderator = UserFactory.create(password=self.password)
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
CourseEnrollmentFactory(user=self.moderator, course_id=self.course.id)
self.moderator.roles.add(Role.objects.get(name="Moderator", course_id=self.course.id))
def test_pin_thread_as_student(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_pin_thread_as_moderator(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def test_un_pin_thread_as_student(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("un_pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_un_pin_thread_as_moderator(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("un_pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def _set_mock_request_thread_and_comment(self, mock_request, thread_data, comment_data):
def handle_request(*args, **kwargs):
url = args[1]
if "/threads/" in url:
return self._create_response_mock(thread_data)
elif "/comments/" in url:
return self._create_response_mock(comment_data)
else:
raise ArgumentError("Bad url to mock request")
mock_request.side_effect = handle_request
def test_endorse_response_as_staff(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.student.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": self.course.id.to_deprecated_string(), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def test_endorse_response_as_student(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.moderator.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": self.course.id.to_deprecated_string(), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_endorse_response_as_student_question_author(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.student.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": self.course.id.to_deprecated_string(), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
class CreateThreadUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {})
request = RequestFactory().post("dummy_url", {"thread_type": "discussion", "body": text, "title": text})
request.user = self.student
request.view_name = "create_thread"
response = views.create_thread(request, course_id=self.course.id.to_deprecated_string(), commentable_id="test_commentable")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
self.assertEqual(mock_request.call_args[1]["data"]["title"], text)
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
class UpdateThreadUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('django_comment_client.base.views.get_discussion_categories_ids', return_value=["test_commentable"])
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request, mock_get_discussion_id_map):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text, "title": text, "thread_type": "question", "commentable_id": "test_commentable"})
request.user = self.student
request.view_name = "update_thread"
response = views.update_thread(request, course_id=self.course.id.to_deprecated_string(), thread_id="dummy_thread_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
self.assertEqual(mock_request.call_args[1]["data"]["title"], text)
self.assertEqual(mock_request.call_args[1]["data"]["thread_type"], "question")
self.assertEqual(mock_request.call_args[1]["data"]["commentable_id"], "test_commentable")
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
class CreateCommentUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "create_comment"
response = views.create_comment(request, course_id=self.course.id.to_deprecated_string(), thread_id="dummy_thread_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
class UpdateCommentUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "update_comment"
response = views.update_comment(request, course_id=self.course.id.to_deprecated_string(), comment_id="dummy_comment_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
class CreateSubCommentUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {
"closed": False,
"depth": 1,
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "create_sub_comment"
response = views.create_sub_comment(request, course_id=self.course.id.to_deprecated_string(), comment_id="dummy_comment_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
class UsersEndpointTestCase(ModuleStoreTestCase, MockRequestSetupMixin):
def set_post_counts(self, mock_request, threads_count=1, comments_count=1):
"""
sets up a mock response from the comments service for getting post counts for our other_user
"""
self._set_mock_request_data(mock_request, {
"threads_count": threads_count,
"comments_count": comments_count,
})
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
self.enrollment = CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
self.other_user = UserFactory.create(username="other")
CourseEnrollmentFactory(user=self.other_user, course_id=self.course.id)
def make_request(self, method='get', course_id=None, **kwargs):
course_id = course_id or self.course.id
request = getattr(RequestFactory(), method)("dummy_url", kwargs)
request.user = self.student
request.view_name = "users"
return views.users(request, course_id=course_id.to_deprecated_string())
@patch('lms.lib.comment_client.utils.requests.request')
def test_finds_exact_match(self, mock_request):
self.set_post_counts(mock_request)
response = self.make_request(username="other")
self.assertEqual(response.status_code, 200)
self.assertEqual(
json.loads(response.content)["users"],
[{"id": self.other_user.id, "username": self.other_user.username}]
)
@patch('lms.lib.comment_client.utils.requests.request')
def test_finds_no_match(self, mock_request):
self.set_post_counts(mock_request)
response = self.make_request(username="othor")
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)["users"], [])
def test_requires_GET(self):
response = self.make_request(method='post', username="other")
self.assertEqual(response.status_code, 405)
def test_requires_username_param(self):
response = self.make_request()
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertIn("errors", content)
self.assertNotIn("users", content)
def test_course_does_not_exist(self):
course_id = SlashSeparatedCourseKey.from_deprecated_string("does/not/exist")
response = self.make_request(course_id=course_id, username="other")
self.assertEqual(response.status_code, 404)
content = json.loads(response.content)
self.assertIn("errors", content)
self.assertNotIn("users", content)
def test_requires_requestor_enrolled_in_course(self):
# unenroll self.student from the course.
self.enrollment.delete()
response = self.make_request(username="other")
self.assertEqual(response.status_code, 404)
content = json.loads(response.content)
self.assertIn("errors", content)
self.assertNotIn("users", content)
@patch('lms.lib.comment_client.utils.requests.request')
def test_requires_matched_user_has_forum_content(self, mock_request):
self.set_post_counts(mock_request, 0, 0)
response = self.make_request(username="other")
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)["users"], [])
|
agpl-3.0
|
sri85/selenium
|
py/selenium/webdriver/common/utils.py
|
63
|
2090
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The Utils methods.
"""
import socket
def free_port():
"""
Determines a free port using sockets.
"""
free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
free_socket.bind(('0.0.0.0', 0))
free_socket.listen(5)
port = free_socket.getsockname()[1]
free_socket.close()
return port
def is_connectable(port):
"""
Tries to connect to the server at port to see if it is running.
:Args:
- port: The port to connect.
"""
try:
socket_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_.settimeout(1)
socket_.connect(("127.0.0.1", port))
result = True
except socket.error:
result = False
finally:
socket_.close()
return result
def is_url_connectable(port):
"""
Tries to connect to the HTTP server at /status path
and specified port to see if it responds successfully.
:Args:
- port: The port to connect.
"""
try:
from urllib import request as url_request
except ImportError:
import urllib2 as url_request
try:
res = url_request.urlopen("http://127.0.0.1:%s/status" % port)
if res.getcode() == 200:
return True
else:
return False
except:
return False
|
apache-2.0
|
gsehub/edx-platform
|
openedx/core/djangoapps/geoinfo/tests/test_middleware.py
|
31
|
5269
|
# pylint: disable=no-member
"""
Tests for CountryMiddleware.
"""
from mock import patch
import pygeoip
from django.contrib.sessions.middleware import SessionMiddleware
from django.test import TestCase
from django.test.client import RequestFactory
from openedx.core.djangoapps.geoinfo.middleware import CountryMiddleware
from student.tests.factories import UserFactory, AnonymousUserFactory
class CountryMiddlewareTests(TestCase):
"""
Tests of CountryMiddleware.
"""
def setUp(self):
super(CountryMiddlewareTests, self).setUp()
self.country_middleware = CountryMiddleware()
self.session_middleware = SessionMiddleware()
self.authenticated_user = UserFactory.create()
self.anonymous_user = AnonymousUserFactory.create()
self.request_factory = RequestFactory()
self.patcher = patch.object(pygeoip.GeoIP, 'country_code_by_addr', self.mock_country_code_by_addr)
self.patcher.start()
self.addCleanup(self.patcher.stop)
def mock_country_code_by_addr(self, ip_addr):
"""
Gives us a fake set of IPs
"""
ip_dict = {
'117.79.83.1': 'CN',
'117.79.83.100': 'CN',
'4.0.0.0': 'SD',
'2001:da8:20f:1502:edcf:550b:4a9c:207d': 'CN',
}
return ip_dict.get(ip_addr, 'US')
def test_country_code_added(self):
request = self.request_factory.get(
'/somewhere',
HTTP_X_FORWARDED_FOR='117.79.83.1',
)
request.user = self.authenticated_user
self.session_middleware.process_request(request)
# No country code exists before request.
self.assertNotIn('country_code', request.session)
self.assertNotIn('ip_address', request.session)
self.country_middleware.process_request(request)
# Country code added to session.
self.assertEqual('CN', request.session.get('country_code'))
self.assertEqual('117.79.83.1', request.session.get('ip_address'))
def test_ip_address_changed(self):
request = self.request_factory.get(
'/somewhere',
HTTP_X_FORWARDED_FOR='4.0.0.0',
)
request.user = self.anonymous_user
self.session_middleware.process_request(request)
request.session['country_code'] = 'CN'
request.session['ip_address'] = '117.79.83.1'
self.country_middleware.process_request(request)
# Country code is changed.
self.assertEqual('SD', request.session.get('country_code'))
self.assertEqual('4.0.0.0', request.session.get('ip_address'))
def test_ip_address_is_not_changed(self):
request = self.request_factory.get(
'/somewhere',
HTTP_X_FORWARDED_FOR='117.79.83.1',
)
request.user = self.anonymous_user
self.session_middleware.process_request(request)
request.session['country_code'] = 'CN'
request.session['ip_address'] = '117.79.83.1'
self.country_middleware.process_request(request)
# Country code is not changed.
self.assertEqual('CN', request.session.get('country_code'))
self.assertEqual('117.79.83.1', request.session.get('ip_address'))
def test_same_country_different_ip(self):
request = self.request_factory.get(
'/somewhere',
HTTP_X_FORWARDED_FOR='117.79.83.100',
)
request.user = self.anonymous_user
self.session_middleware.process_request(request)
request.session['country_code'] = 'CN'
request.session['ip_address'] = '117.79.83.1'
self.country_middleware.process_request(request)
# Country code is not changed.
self.assertEqual('CN', request.session.get('country_code'))
self.assertEqual('117.79.83.100', request.session.get('ip_address'))
def test_ip_address_is_none(self):
# IP address is not defined in request.
request = self.request_factory.get('/somewhere')
request.user = self.anonymous_user
# Run process_request to set up the session in the request
# to be able to override it.
self.session_middleware.process_request(request)
request.session['country_code'] = 'CN'
request.session['ip_address'] = '117.79.83.1'
self.country_middleware.process_request(request)
# No country code exists after request processing.
self.assertNotIn('country_code', request.session)
self.assertNotIn('ip_address', request.session)
def test_ip_address_is_ipv6(self):
request = self.request_factory.get(
'/somewhere',
HTTP_X_FORWARDED_FOR='2001:da8:20f:1502:edcf:550b:4a9c:207d'
)
request.user = self.authenticated_user
self.session_middleware.process_request(request)
# No country code exists before request.
self.assertNotIn('country_code', request.session)
self.assertNotIn('ip_address', request.session)
self.country_middleware.process_request(request)
# Country code added to session.
self.assertEqual('CN', request.session.get('country_code'))
self.assertEqual(
'2001:da8:20f:1502:edcf:550b:4a9c:207d', request.session.get('ip_address'))
|
agpl-3.0
|
ansible/ansible
|
test/support/integration/plugins/module_utils/aws/s3.py
|
60
|
1584
|
# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # Handled by the calling module
HAS_MD5 = True
try:
from hashlib import md5
except ImportError:
try:
from md5 import md5
except ImportError:
HAS_MD5 = False
def calculate_etag(module, filename, etag, s3, bucket, obj, version=None):
if not HAS_MD5:
return None
if '-' in etag:
# Multi-part ETag; a hash of the hashes of each part.
parts = int(etag[1:-1].split('-')[1])
digests = []
s3_kwargs = dict(
Bucket=bucket,
Key=obj,
)
if version:
s3_kwargs['VersionId'] = version
with open(filename, 'rb') as f:
for part_num in range(1, parts + 1):
s3_kwargs['PartNumber'] = part_num
try:
head = s3.head_object(**s3_kwargs)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to get head object")
digests.append(md5(f.read(int(head['ContentLength']))))
digest_squared = md5(b''.join(m.digest() for m in digests))
return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests))
else: # Compute the MD5 sum normally
return '"{0}"'.format(module.md5(filename))
|
gpl-3.0
|
wemanuel/smry
|
server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/url_maps/add_host_rule.py
|
2
|
3707
|
# Copyright 2014 Google Inc. All Rights Reserved.
"""Command for adding a host rule to a URL map."""
import copy
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.compute.lib import base_classes
class AddHostRule(base_classes.ReadWriteCommand):
"""Add a rule to a URL map to map hosts to a path matcher."""
@staticmethod
def Args(parser):
parser.add_argument(
'--description',
help='An optional, textual description for the host rule.')
hosts = parser.add_argument(
'--hosts',
type=arg_parsers.ArgList(min_length=1),
action=arg_parsers.FloatingListValuesCatcher(),
metavar='HOST',
required=True,
help='The set of hosts to match requests against.')
hosts.detailed_help = """\
The set of hosts to match requests against. Each host must be
a fully qualified domain name (FQDN) with the exception that
the host can begin with a ``*'' or ``*-''. ``*'' acts as a
glob and will match any string of atoms to the left where an
atom is separated by dots (``.'') or dashes (``-'').
"""
path_matcher = parser.add_argument(
'--path-matcher-name',
required=True,
help=('The name of the patch matcher to use if a request matches this '
'host rule.'))
path_matcher.detailed_help = """\
The name of the patch matcher to use if a request matches this
host rule. The patch matcher must already exist in the URL map
(see 'gcloud compute url-maps add-path-matcher').
"""
parser.add_argument(
'name',
help='The name of the URL map.')
@property
def service(self):
return self.compute.urlMaps
@property
def resource_type(self):
return 'urlMaps'
def CreateReference(self, args):
return self.CreateGlobalReference(args.name)
def GetGetRequest(self, args):
"""Returns the request for the existing URL map resource."""
return (self.service,
'Get',
self.messages.ComputeUrlMapsGetRequest(
urlMap=self.ref.Name(),
project=self.project))
def GetSetRequest(self, args, replacement, existing):
return (self.service,
'Update',
self.messages.ComputeUrlMapsUpdateRequest(
urlMap=self.ref.Name(),
urlMapResource=replacement,
project=self.project))
def Modify(self, args, existing):
"""Returns a modified URL map message."""
replacement = copy.deepcopy(existing)
new_host_rule = self.messages.HostRule(
description=args.description,
hosts=sorted(args.hosts),
pathMatcher=args.path_matcher_name)
replacement.hostRules.append(new_host_rule)
return replacement
AddHostRule.detailed_help = {
'brief': 'Add a rule to a URL map to map hosts to a path matcher',
'DESCRIPTION': """\
*{command}* is used to add a mapping of hosts to a patch
matcher in a URL map. The mapping will match the host
component of HTTP requests to path matchers which in turn map
the request to a backend service. Before adding a host rule,
at least one path matcher must exist in the URL map to take
care of the path component of the requests. 'gcloud compute
url-maps add-path-matcher' or 'gcloud compute url-maps edit'
can be used to add path matchers.
""",
'EXAMPLES': """\
To create a host rule mapping the ``*-foo.google.com'' and
``google.com'' hosts to the ``www'' path matcher, run:
$ {command} MY-URL-MAP --hosts *-foo.google.com google.com --path-matcher-name www
""",
}
|
apache-2.0
|
azumimuo/family-xbmc-addon
|
plugin.video.ZemTV-shani/websocket/tests/test_websocket.py
|
57
|
26212
|
# -*- coding: utf-8 -*-
#
import six
import sys
sys.path[0:0] = [""]
import os
import os.path
import base64
import socket
try:
from ssl import SSLError
except ImportError:
# dummy class of SSLError for ssl none-support environment.
class SSLError(Exception):
pass
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
import unittest2 as unittest
else:
import unittest
import uuid
if six.PY3:
from base64 import decodebytes as base64decode
else:
from base64 import decodestring as base64decode
# websocket-client
import websocket as ws
from websocket._handshake import _create_sec_websocket_key
from websocket._url import parse_url, get_proxy_info
from websocket._utils import validate_utf8
from websocket._handshake import _validate as _validate_header
from websocket._http import read_headers
# Skip test to access the internet.
TEST_WITH_INTERNET = os.environ.get('TEST_WITH_INTERNET', '0') == '1'
# Skip Secure WebSocket test.
TEST_SECURE_WS = True
TRACABLE = False
def create_mask_key(n):
return "abcd"
class SockMock(object):
def __init__(self):
self.data = []
self.sent = []
def add_packet(self, data):
self.data.append(data)
def recv(self, bufsize):
if self.data:
e = self.data.pop(0)
if isinstance(e, Exception):
raise e
if len(e) > bufsize:
self.data.insert(0, e[bufsize:])
return e[:bufsize]
def send(self, data):
self.sent.append(data)
return len(data)
def close(self):
pass
class HeaderSockMock(SockMock):
def __init__(self, fname):
SockMock.__init__(self)
path = os.path.join(os.path.dirname(__file__), fname)
with open(path, "rb") as f:
self.add_packet(f.read())
class WebSocketTest(unittest.TestCase):
def setUp(self):
ws.enableTrace(TRACABLE)
def tearDown(self):
pass
def testDefaultTimeout(self):
self.assertEqual(ws.getdefaulttimeout(), None)
ws.setdefaulttimeout(10)
self.assertEqual(ws.getdefaulttimeout(), 10)
ws.setdefaulttimeout(None)
def testParseUrl(self):
p = parse_url("ws://www.example.com/r")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com/r/")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/r/")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com/")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com:8080/r")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com:8080/")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com:8080")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/")
self.assertEqual(p[3], False)
p = parse_url("wss://www.example.com:8080/r")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], True)
p = parse_url("wss://www.example.com:8080/r?key=value")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r?key=value")
self.assertEqual(p[3], True)
self.assertRaises(ValueError, parse_url, "http://www.example.com/r")
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
return
p = parse_url("ws://[2a03:4000:123:83::3]/r")
self.assertEqual(p[0], "2a03:4000:123:83::3")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], False)
p = parse_url("ws://[2a03:4000:123:83::3]:8080/r")
self.assertEqual(p[0], "2a03:4000:123:83::3")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], False)
p = parse_url("wss://[2a03:4000:123:83::3]/r")
self.assertEqual(p[0], "2a03:4000:123:83::3")
self.assertEqual(p[1], 443)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], True)
p = parse_url("wss://[2a03:4000:123:83::3]:8080/r")
self.assertEqual(p[0], "2a03:4000:123:83::3")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], True)
def testWSKey(self):
key = _create_sec_websocket_key()
self.assertTrue(key != 24)
self.assertTrue(six.u("Β₯n") not in key)
def testWsUtils(self):
key = "c6b8hTg4EeGb2gQMztV1/g=="
required_header = {
"upgrade": "websocket",
"connection": "upgrade",
"sec-websocket-accept": "Kxep+hNu9n51529fGidYu7a3wO0=",
}
self.assertEqual(_validate_header(required_header, key, None), (True, None))
header = required_header.copy()
header["upgrade"] = "http"
self.assertEqual(_validate_header(header, key, None), (False, None))
del header["upgrade"]
self.assertEqual(_validate_header(header, key, None), (False, None))
header = required_header.copy()
header["connection"] = "something"
self.assertEqual(_validate_header(header, key, None), (False, None))
del header["connection"]
self.assertEqual(_validate_header(header, key, None), (False, None))
header = required_header.copy()
header["sec-websocket-accept"] = "something"
self.assertEqual(_validate_header(header, key, None), (False, None))
del header["sec-websocket-accept"]
self.assertEqual(_validate_header(header, key, None), (False, None))
header = required_header.copy()
header["sec-websocket-protocol"] = "sub1"
self.assertEqual(_validate_header(header, key, ["sub1", "sub2"]), (True, "sub1"))
self.assertEqual(_validate_header(header, key, ["sub2", "sub3"]), (False, None))
header = required_header.copy()
header["sec-websocket-protocol"] = "sUb1"
self.assertEqual(_validate_header(header, key, ["Sub1", "suB2"]), (True, "sub1"))
def testReadHeader(self):
status, header = read_headers(HeaderSockMock("data/header01.txt"))
self.assertEqual(status, 101)
self.assertEqual(header["connection"], "upgrade")
HeaderSockMock("data/header02.txt")
self.assertRaises(ws.WebSocketException, read_headers, HeaderSockMock("data/header02.txt"))
def testSend(self):
# TODO: add longer frame data
sock = ws.WebSocket()
sock.set_mask_key(create_mask_key)
s = sock.sock = HeaderSockMock("data/header01.txt")
sock.send("Hello")
self.assertEqual(s.sent[0], six.b("\x81\x85abcd)\x07\x0f\x08\x0e"))
sock.send("γγγ«γ‘γ―")
self.assertEqual(s.sent[1], six.b("\x81\x8fabcd\x82\xe3\xf0\x87\xe3\xf1\x80\xe5\xca\x81\xe2\xc5\x82\xe3\xcc"))
sock.send(u"γγγ«γ‘γ―")
self.assertEqual(s.sent[1], six.b("\x81\x8fabcd\x82\xe3\xf0\x87\xe3\xf1\x80\xe5\xca\x81\xe2\xc5\x82\xe3\xcc"))
sock.send("x" * 127)
def testRecv(self):
# TODO: add longer frame data
sock = ws.WebSocket()
s = sock.sock = SockMock()
something = six.b("\x81\x8fabcd\x82\xe3\xf0\x87\xe3\xf1\x80\xe5\xca\x81\xe2\xc5\x82\xe3\xcc")
s.add_packet(something)
data = sock.recv()
self.assertEqual(data, "γγγ«γ‘γ―")
s.add_packet(six.b("\x81\x85abcd)\x07\x0f\x08\x0e"))
data = sock.recv()
self.assertEqual(data, "Hello")
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testIter(self):
count = 2
for rsvp in ws.create_connection('ws://stream.meetup.com/2/rsvps'):
count -= 1
if count == 0:
break
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testNext(self):
sock = ws.create_connection('ws://stream.meetup.com/2/rsvps')
self.assertEqual(str, type(next(sock)))
def testInternalRecvStrict(self):
sock = ws.WebSocket()
s = sock.sock = SockMock()
s.add_packet(six.b("foo"))
s.add_packet(socket.timeout())
s.add_packet(six.b("bar"))
# s.add_packet(SSLError("The read operation timed out"))
s.add_packet(six.b("baz"))
with self.assertRaises(ws.WebSocketTimeoutException):
data = sock.frame_buffer.recv_strict(9)
# if six.PY2:
# with self.assertRaises(ws.WebSocketTimeoutException):
# data = sock._recv_strict(9)
# else:
# with self.assertRaises(SSLError):
# data = sock._recv_strict(9)
data = sock.frame_buffer.recv_strict(9)
self.assertEqual(data, six.b("foobarbaz"))
with self.assertRaises(ws.WebSocketConnectionClosedException):
data = sock.frame_buffer.recv_strict(1)
def testRecvTimeout(self):
sock = ws.WebSocket()
s = sock.sock = SockMock()
s.add_packet(six.b("\x81"))
s.add_packet(socket.timeout())
s.add_packet(six.b("\x8dabcd\x29\x07\x0f\x08\x0e"))
s.add_packet(socket.timeout())
s.add_packet(six.b("\x4e\x43\x33\x0e\x10\x0f\x00\x40"))
with self.assertRaises(ws.WebSocketTimeoutException):
data = sock.recv()
with self.assertRaises(ws.WebSocketTimeoutException):
data = sock.recv()
data = sock.recv()
self.assertEqual(data, "Hello, World!")
with self.assertRaises(ws.WebSocketConnectionClosedException):
data = sock.recv()
def testRecvWithSimpleFragmentation(self):
sock = ws.WebSocket()
s = sock.sock = SockMock()
# OPCODE=TEXT, FIN=0, MSG="Brevity is "
s.add_packet(six.b("\x01\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C"))
# OPCODE=CONT, FIN=1, MSG="the soul of wit"
s.add_packet(six.b("\x80\x8fabcd\x15\n\x06D\x12\r\x16\x08A\r\x05D\x16\x0b\x17"))
data = sock.recv()
self.assertEqual(data, "Brevity is the soul of wit")
with self.assertRaises(ws.WebSocketConnectionClosedException):
sock.recv()
def testRecvWithFireEventOfFragmentation(self):
sock = ws.WebSocket(fire_cont_frame=True)
s = sock.sock = SockMock()
# OPCODE=TEXT, FIN=0, MSG="Brevity is "
s.add_packet(six.b("\x01\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C"))
# OPCODE=CONT, FIN=0, MSG="Brevity is "
s.add_packet(six.b("\x00\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C"))
# OPCODE=CONT, FIN=1, MSG="the soul of wit"
s.add_packet(six.b("\x80\x8fabcd\x15\n\x06D\x12\r\x16\x08A\r\x05D\x16\x0b\x17"))
_, data = sock.recv_data()
self.assertEqual(data, six.b("Brevity is "))
_, data = sock.recv_data()
self.assertEqual(data, six.b("Brevity is "))
_, data = sock.recv_data()
self.assertEqual(data, six.b("the soul of wit"))
# OPCODE=CONT, FIN=0, MSG="Brevity is "
s.add_packet(six.b("\x80\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C"))
with self.assertRaises(ws.WebSocketException):
sock.recv_data()
with self.assertRaises(ws.WebSocketConnectionClosedException):
sock.recv()
def testClose(self):
sock = ws.WebSocket()
sock.sock = SockMock()
sock.connected = True
sock.close()
self.assertEqual(sock.connected, False)
sock = ws.WebSocket()
s = sock.sock = SockMock()
sock.connected = True
s.add_packet(six.b('\x88\x80\x17\x98p\x84'))
sock.recv()
self.assertEqual(sock.connected, False)
def testRecvContFragmentation(self):
sock = ws.WebSocket()
s = sock.sock = SockMock()
# OPCODE=CONT, FIN=1, MSG="the soul of wit"
s.add_packet(six.b("\x80\x8fabcd\x15\n\x06D\x12\r\x16\x08A\r\x05D\x16\x0b\x17"))
self.assertRaises(ws.WebSocketException, sock.recv)
def testRecvWithProlongedFragmentation(self):
sock = ws.WebSocket()
s = sock.sock = SockMock()
# OPCODE=TEXT, FIN=0, MSG="Once more unto the breach, "
s.add_packet(six.b("\x01\x9babcd.\x0c\x00\x01A\x0f\x0c\x16\x04B\x16\n\x15" \
"\rC\x10\t\x07C\x06\x13\x07\x02\x07\tNC"))
# OPCODE=CONT, FIN=0, MSG="dear friends, "
s.add_packet(six.b("\x00\x8eabcd\x05\x07\x02\x16A\x04\x11\r\x04\x0c\x07" \
"\x17MB"))
# OPCODE=CONT, FIN=1, MSG="once more"
s.add_packet(six.b("\x80\x89abcd\x0e\x0c\x00\x01A\x0f\x0c\x16\x04"))
data = sock.recv()
self.assertEqual(
data,
"Once more unto the breach, dear friends, once more")
with self.assertRaises(ws.WebSocketConnectionClosedException):
sock.recv()
def testRecvWithFragmentationAndControlFrame(self):
sock = ws.WebSocket()
sock.set_mask_key(create_mask_key)
s = sock.sock = SockMock()
# OPCODE=TEXT, FIN=0, MSG="Too much "
s.add_packet(six.b("\x01\x89abcd5\r\x0cD\x0c\x17\x00\x0cA"))
# OPCODE=PING, FIN=1, MSG="Please PONG this"
s.add_packet(six.b("\x89\x90abcd1\x0e\x06\x05\x12\x07C4.,$D\x15\n\n\x17"))
# OPCODE=CONT, FIN=1, MSG="of a good thing"
s.add_packet(six.b("\x80\x8fabcd\x0e\x04C\x05A\x05\x0c\x0b\x05B\x17\x0c" \
"\x08\x0c\x04"))
data = sock.recv()
self.assertEqual(data, "Too much of a good thing")
with self.assertRaises(ws.WebSocketConnectionClosedException):
sock.recv()
self.assertEqual(
s.sent[0],
six.b("\x8a\x90abcd1\x0e\x06\x05\x12\x07C4.,$D\x15\n\n\x17"))
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testWebSocket(self):
s = ws.create_connection("ws://echo.websocket.org/")
self.assertNotEqual(s, None)
s.send("Hello, World")
result = s.recv()
self.assertEqual(result, "Hello, World")
s.send(u"γγ«γγ«γγ‘γ―γδΈη")
result = s.recv()
self.assertEqual(result, "γγ«γγ«γγ‘γ―γδΈη")
s.close()
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testPingPong(self):
s = ws.create_connection("ws://echo.websocket.org/")
self.assertNotEqual(s, None)
s.ping("Hello")
s.pong("Hi")
s.close()
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
@unittest.skipUnless(TEST_SECURE_WS, "wss://echo.websocket.org doesn't work well.")
def testSecureWebSocket(self):
if 1:
import ssl
s = ws.create_connection("wss://echo.websocket.org/")
self.assertNotEqual(s, None)
self.assertTrue(isinstance(s.sock, ssl.SSLSocket))
s.send("Hello, World")
result = s.recv()
self.assertEqual(result, "Hello, World")
s.send(u"γγ«γγ«γγ‘γ―γδΈη")
result = s.recv()
self.assertEqual(result, "γγ«γγ«γγ‘γ―γδΈη")
s.close()
#except:
# pass
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testWebSocketWihtCustomHeader(self):
s = ws.create_connection("ws://echo.websocket.org/",
headers={"User-Agent": "PythonWebsocketClient"})
self.assertNotEqual(s, None)
s.send("Hello, World")
result = s.recv()
self.assertEqual(result, "Hello, World")
s.close()
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testAfterClose(self):
s = ws.create_connection("ws://echo.websocket.org/")
self.assertNotEqual(s, None)
s.close()
self.assertRaises(ws.WebSocketConnectionClosedException, s.send, "Hello")
self.assertRaises(ws.WebSocketConnectionClosedException, s.recv)
def testNonce(self):
""" WebSocket key should be a random 16-byte nonce.
"""
key = _create_sec_websocket_key()
nonce = base64decode(key.encode("utf-8"))
self.assertEqual(16, len(nonce))
class WebSocketAppTest(unittest.TestCase):
class NotSetYet(object):
""" A marker class for signalling that a value hasn't been set yet.
"""
def setUp(self):
ws.enableTrace(TRACABLE)
WebSocketAppTest.keep_running_open = WebSocketAppTest.NotSetYet()
WebSocketAppTest.keep_running_close = WebSocketAppTest.NotSetYet()
WebSocketAppTest.get_mask_key_id = WebSocketAppTest.NotSetYet()
def tearDown(self):
WebSocketAppTest.keep_running_open = WebSocketAppTest.NotSetYet()
WebSocketAppTest.keep_running_close = WebSocketAppTest.NotSetYet()
WebSocketAppTest.get_mask_key_id = WebSocketAppTest.NotSetYet()
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testKeepRunning(self):
""" A WebSocketApp should keep running as long as its self.keep_running
is not False (in the boolean context).
"""
def on_open(self, *args, **kwargs):
""" Set the keep_running flag for later inspection and immediately
close the connection.
"""
WebSocketAppTest.keep_running_open = self.keep_running
self.close()
def on_close(self, *args, **kwargs):
""" Set the keep_running flag for the test to use.
"""
WebSocketAppTest.keep_running_close = self.keep_running
app = ws.WebSocketApp('ws://echo.websocket.org/', on_open=on_open, on_close=on_close)
app.run_forever()
self.assertFalse(isinstance(WebSocketAppTest.keep_running_open,
WebSocketAppTest.NotSetYet))
self.assertFalse(isinstance(WebSocketAppTest.keep_running_close,
WebSocketAppTest.NotSetYet))
self.assertEqual(True, WebSocketAppTest.keep_running_open)
self.assertEqual(False, WebSocketAppTest.keep_running_close)
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testSockMaskKey(self):
""" A WebSocketApp should forward the received mask_key function down
to the actual socket.
"""
def my_mask_key_func():
pass
def on_open(self, *args, **kwargs):
""" Set the value so the test can use it later on and immediately
close the connection.
"""
WebSocketAppTest.get_mask_key_id = id(self.get_mask_key)
self.close()
app = ws.WebSocketApp('ws://echo.websocket.org/', on_open=on_open, get_mask_key=my_mask_key_func)
app.run_forever()
# Note: We can't use 'is' for comparing the functions directly, need to use 'id'.
self.assertEqual(WebSocketAppTest.get_mask_key_id, id(my_mask_key_func))
class SockOptTest(unittest.TestCase):
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testSockOpt(self):
sockopt = ((socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),)
s = ws.create_connection("ws://echo.websocket.org", sockopt=sockopt)
self.assertNotEqual(s.sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY), 0)
s.close()
class UtilsTest(unittest.TestCase):
def testUtf8Validator(self):
state = validate_utf8(six.b('\xf0\x90\x80\x80'))
self.assertEqual(state, True)
state = validate_utf8(six.b('\xce\xba\xe1\xbd\xb9\xcf\x83\xce\xbc\xce\xb5\xed\xa0\x80edited'))
self.assertEqual(state, False)
state = validate_utf8(six.b(''))
self.assertEqual(state, True)
class ProxyInfoTest(unittest.TestCase):
def setUp(self):
self.http_proxy = os.environ.get("http_proxy", None)
self.https_proxy = os.environ.get("https_proxy", None)
if "http_proxy" in os.environ:
del os.environ["http_proxy"]
if "https_proxy" in os.environ:
del os.environ["https_proxy"]
def tearDown(self):
if self.http_proxy:
os.environ["http_proxy"] = self.http_proxy
elif "http_proxy" in os.environ:
del os.environ["http_proxy"]
if self.https_proxy:
os.environ["https_proxy"] = self.https_proxy
elif "https_proxy" in os.environ:
del os.environ["https_proxy"]
def testProxyFromArgs(self):
self.assertEqual(get_proxy_info("echo.websocket.org", False, proxy_host="localhost"), ("localhost", 0, None))
self.assertEqual(get_proxy_info("echo.websocket.org", False, proxy_host="localhost", proxy_port=3128), ("localhost", 3128, None))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost"), ("localhost", 0, None))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128), ("localhost", 3128, None))
self.assertEqual(get_proxy_info("echo.websocket.org", False, proxy_host="localhost", proxy_auth=("a", "b")),
("localhost", 0, ("a", "b")))
self.assertEqual(get_proxy_info("echo.websocket.org", False, proxy_host="localhost", proxy_port=3128, proxy_auth=("a", "b")),
("localhost", 3128, ("a", "b")))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_auth=("a", "b")),
("localhost", 0, ("a", "b")))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128, proxy_auth=("a", "b")),
("localhost", 3128, ("a", "b")))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128, no_proxy=["example.com"], proxy_auth=("a", "b")),
("localhost", 3128, ("a", "b")))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128, no_proxy=["echo.websocket.org"], proxy_auth=("a", "b")),
(None, 0, None))
def testProxyFromEnv(self):
os.environ["http_proxy"] = "http://localhost/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, None))
os.environ["http_proxy"] = "http://localhost:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, None))
os.environ["http_proxy"] = "http://localhost/"
os.environ["https_proxy"] = "http://localhost2/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, None))
os.environ["http_proxy"] = "http://localhost:3128/"
os.environ["https_proxy"] = "http://localhost2:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, None))
os.environ["http_proxy"] = "http://localhost/"
os.environ["https_proxy"] = "http://localhost2/"
self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", None, None))
os.environ["http_proxy"] = "http://localhost:3128/"
os.environ["https_proxy"] = "http://localhost2:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", 3128, None))
os.environ["http_proxy"] = "http://a:b@localhost/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost/"
os.environ["https_proxy"] = "http://a:b@localhost2/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost/"
os.environ["https_proxy"] = "http://a:b@localhost2/"
self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", None, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", 3128, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost/"
os.environ["https_proxy"] = "http://a:b@localhost2/"
os.environ["no_proxy"] = "example1.com,example2.com"
self.assertEqual(get_proxy_info("example.1.com", True), ("localhost2", None, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
os.environ["no_proxy"] = "example1.com,example2.com, echo.websocket.org"
self.assertEqual(get_proxy_info("echo.websocket.org", True), (None, 0, None))
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
|
guludo/ardupilot-1
|
Tools/scripts/magfit_flashlog.py
|
278
|
4744
|
#!/usr/bin/env python
''' fit best estimate of magnetometer offsets from ArduCopter flashlog
using the algorithm from Bill Premerlani
'''
import sys, time, os, math
# command line option handling
from optparse import OptionParser
parser = OptionParser("magfit_flashlog.py [options]")
parser.add_option("--verbose", action='store_true', default=False, help="verbose offset output")
parser.add_option("--gain", type='float', default=0.01, help="algorithm gain")
parser.add_option("--noise", type='float', default=0, help="noise to add")
parser.add_option("--max-change", type='float', default=10, help="max step change")
parser.add_option("--min-diff", type='float', default=50, help="min mag vector delta")
parser.add_option("--history", type='int', default=20, help="how many points to keep")
parser.add_option("--repeat", type='int', default=1, help="number of repeats through the data")
(opts, args) = parser.parse_args()
from rotmat import Vector3, Matrix3
if len(args) < 1:
print("Usage: magfit_flashlog.py [options] <LOGFILE...>")
sys.exit(1)
def noise():
'''a noise vector'''
from random import gauss
v = Vector3(gauss(0, 1), gauss(0, 1), gauss(0, 1))
v.normalize()
return v * opts.noise
def find_offsets(data, ofs):
'''find mag offsets by applying Bills "offsets revisited" algorithm
on the data
This is an implementation of the algorithm from:
http://gentlenav.googlecode.com/files/MagnetometerOffsetNullingRevisited.pdf
'''
# a limit on the maximum change in each step
max_change = opts.max_change
# the gain factor for the algorithm
gain = opts.gain
data2 = []
for d in data:
d = d.copy() + noise()
d.x = float(int(d.x + 0.5))
d.y = float(int(d.y + 0.5))
d.z = float(int(d.z + 0.5))
data2.append(d)
data = data2
history_idx = 0
mag_history = data[0:opts.history]
for i in range(opts.history, len(data)):
B1 = mag_history[history_idx] + ofs
B2 = data[i] + ofs
diff = B2 - B1
diff_length = diff.length()
if diff_length <= opts.min_diff:
# the mag vector hasn't changed enough - we don't get any
# information from this
history_idx = (history_idx+1) % opts.history
continue
mag_history[history_idx] = data[i]
history_idx = (history_idx+1) % opts.history
# equation 6 of Bills paper
delta = diff * (gain * (B2.length() - B1.length()) / diff_length)
# limit the change from any one reading. This is to prevent
# single crazy readings from throwing off the offsets for a long
# time
delta_length = delta.length()
if max_change != 0 and delta_length > max_change:
delta *= max_change / delta_length
# set the new offsets
ofs = ofs - delta
if opts.verbose:
print ofs
return ofs
def plot_corrected_field(filename, data, offsets):
f = open(filename, mode='w')
for d in data:
corrected = d + offsets
f.write("%.1f\n" % corrected.length())
f.close()
def magfit(logfile):
'''find best magnetometer offset fit to a log file'''
print("Processing log %s" % filename)
# open the log file
flog = open(filename, mode='r')
data = []
data_no_motors = []
mag = None
offsets = None
# now gather all the data
for line in flog:
if not line.startswith('COMPASS,'):
continue
line = line.rstrip()
line = line.replace(' ', '')
a = line.split(',')
ofs = Vector3(float(a[4]), float(a[5]), float(a[6]))
if offsets is None:
initial_offsets = ofs
offsets = ofs
motor_ofs = Vector3(float(a[7]), float(a[8]), float(a[9]))
mag = Vector3(float(a[1]), float(a[2]), float(a[3]))
mag = mag - offsets
data.append(mag)
data_no_motors.append(mag - motor_ofs)
print("Extracted %u data points" % len(data))
print("Current offsets: %s" % initial_offsets)
# run the fitting algorithm
ofs = initial_offsets
for r in range(opts.repeat):
ofs = find_offsets(data, ofs)
plot_corrected_field('plot.dat', data, ofs)
plot_corrected_field('initial.dat', data, initial_offsets)
plot_corrected_field('zero.dat', data, Vector3(0,0,0))
plot_corrected_field('hand.dat', data, Vector3(-25,-8,-2))
plot_corrected_field('zero-no-motors.dat', data_no_motors, Vector3(0,0,0))
print('Loop %u offsets %s' % (r, ofs))
sys.stdout.flush()
print("New offsets: %s" % ofs)
total = 0.0
for filename in args:
magfit(filename)
|
gpl-3.0
|
uwescience/pulse2percept
|
pulse2percept/implants/bvt.py
|
1
|
4694
|
"""`BVT24`"""
import numpy as np
from .base import ProsthesisSystem
from .electrodes import DiskElectrode
from .electrode_arrays import ElectrodeArray
class BVT24(ProsthesisSystem):
"""24-channel suprachoroidal retinal prosthesis
This class creates a 24-channel suprachoroidal retinal prosthesis
[Layton2014]_, which was developed by the Bionic Vision Australia
Consortium and commercialized by Bionic Vision Technologies (BVT).
The center of the array is located at (x,y,z), given in microns, and the
array is rotated by rotation angle ``rot``, given in radians.
The array consists of:
- 33 platinum stimulating electrodes:
- 30 electrodes with 600um diameter (Electrodes 1-20 (except
9, 17, 19) and Electrodes 21a-m),
- 3 electrodes with 400um diameter (Electrodes 9, 17, 19)
- 2 return electrodes with 2000um diameter (Electrodes 22, 23)
Electrodes 21a-m are typically being ganged to provide an external
ring for common ground. The center of the array is assumed to lie
between Electrodes 7, 8, 9, and 13.
.. note::
Column order for electrode numbering is reversed in a left-eye
implant.
.. versionadded:: 0.6
Parameters
----------
x : float
x coordinate of the array center (um)
y : float
y coordinate of the array center (um)
z: float or array_like
Distance of the array to the retinal surface (um). Either a list
with 60 entries or a scalar.
rot : float
Rotation angle of the array (rad). Positive values denote
counter-clock-wise (CCW) rotations in the retinal coordinate
system.
eye : {'RE', 'LE'}, optional
Eye in which array is implanted.
"""
# Frozen class: User cannot add more class attributes
__slots__ = ()
def __init__(self, x=0, y=0, z=0, rot=0, eye='RE', stim=None):
self.eye = eye
self.earray = ElectrodeArray([])
n_elecs = 35
# the positions of the electrodes 1-20, 21a-21m, R1-R2
x_arr = [-1275.0, -850.0, -1275.0, -850.0, -1275.0,
-425.0, 0, -425.0, 0, -425.0,
425.0, 850.0, 425.0, 850.0, 425.0,
1275.0, 1700.0, 1275.0, 1700.0, 1275.0,
-850.0, 0, 850.0, 1700.0, 2125.0,
2550.0, 2125.0, 2550.0, 2125.0, 1700.0,
850.0, 0, -850.0, 7000.0, 9370.0]
y_arr = [1520.0, 760.0, 0, -760.0, -1520.0,
1520.0, 760.0, 0, -760.0, -1520.0,
1520.0, 760.0, 0, -760.0, -1520.0,
1520.0, 760.0, 0, -760.0, -1520.0,
2280.0, 2280.0, 2280.0, 2280.0, 1520.0,
760.0, 0.0, -760.0, -1520.0, -2280.0,
-2280.0, -2280.0, -2280.0, 0, 0]
if isinstance(z, (list, np.ndarray)):
# Specify different height for every electrode in a list:
z_arr = np.asarray(self.z).flatten()
if z_arr.size != n_elecs:
raise ValueError("If `z` is a list, it must have %d entries, "
"not %d." % (n_elecs, len(z)))
else:
# If `z` is a scalar, choose same height for all electrodes:
z_arr = np.ones(n_elecs, dtype=float) * z
# the position of the electrodes 1-20, 21a-21m, R1-R2 for left eye
if eye == 'LE':
x_arr = np.negative(x_arr)
# the radius of all the electrodes in the implants
r_arr = [300.0] * n_elecs
# the radius of electrodes 9, 17, 19 is 200.0 um
r_arr[8] = r_arr[16] = r_arr[18] = 200.0
# the radius of the return electrodes is 1000.0 um
r_arr[33] = r_arr[34] = 1000.0
# the names of the electrodes 1-20, 21a-21m, R1 and R2
names = [str(name) for name in range(1, 21)]
names.extend(['21a', '21b', '21c', '21d', '21e',
'21f', '21g', '21h', '21i', '21j',
'21k', '21l', '21m'])
names.extend(['R1', 'R2'])
# Rotate the grid:
rotmat = np.array([np.cos(rot), -np.sin(rot),
np.sin(rot), np.cos(rot)]).reshape((2, 2))
xy = np.matmul(rotmat, np.vstack((x_arr, y_arr)))
x_arr = xy[0, :]
y_arr = xy[1, :]
# Apply offset to make the grid centered at (x, y):
x_arr += x
y_arr += y
for x, y, z, r, name in zip(x_arr, y_arr, z_arr, r_arr, names):
self.earray.add_electrode(name, DiskElectrode(x, y, z, r))
# Beware of race condition: Stim must be set last, because it requires
# indexing into self.electrodes:
self.stim = stim
|
bsd-3-clause
|
jemekite/Dougpool
|
p2pool/util/deferral.py
|
233
|
8790
|
from __future__ import division
import itertools
import random
import sys
from twisted.internet import defer, reactor
from twisted.python import failure, log
def sleep(t):
d = defer.Deferred(canceller=lambda d_: dc.cancel())
dc = reactor.callLater(t, d.callback, None)
return d
def run_repeatedly(f, *args, **kwargs):
current_dc = [None]
def step():
delay = f(*args, **kwargs)
current_dc[0] = reactor.callLater(delay, step)
step()
def stop():
current_dc[0].cancel()
return stop
class RetrySilentlyException(Exception):
pass
def retry(message='Error:', delay=3, max_retries=None, traceback=True):
'''
@retry('Error getting block:', 1)
@defer.inlineCallbacks
def get_block(hash):
...
'''
def retry2(func):
@defer.inlineCallbacks
def f(*args, **kwargs):
for i in itertools.count():
try:
result = yield func(*args, **kwargs)
except Exception, e:
if i == max_retries:
raise
if not isinstance(e, RetrySilentlyException):
if traceback:
log.err(None, message)
else:
print >>sys.stderr, message, e
yield sleep(delay)
else:
defer.returnValue(result)
return f
return retry2
class ReplyMatcher(object):
'''
Converts request/got response interface to deferred interface
'''
def __init__(self, func, timeout=5):
self.func = func
self.timeout = timeout
self.map = {}
def __call__(self, id):
if id not in self.map:
self.func(id)
df = defer.Deferred()
def timeout():
self.map[id].remove((df, timer))
if not self.map[id]:
del self.map[id]
df.errback(failure.Failure(defer.TimeoutError('in ReplyMatcher')))
timer = reactor.callLater(self.timeout, timeout)
self.map.setdefault(id, set()).add((df, timer))
return df
def got_response(self, id, resp):
if id not in self.map:
return
for df, timer in self.map.pop(id):
df.callback(resp)
timer.cancel()
class GenericDeferrer(object):
'''
Converts query with identifier/got response interface to deferred interface
'''
def __init__(self, max_id, func, timeout=5, on_timeout=lambda: None):
self.max_id = max_id
self.func = func
self.timeout = timeout
self.on_timeout = on_timeout
self.map = {}
def __call__(self, *args, **kwargs):
while True:
id = random.randrange(self.max_id)
if id not in self.map:
break
def cancel(df):
df, timer = self.map.pop(id)
timer.cancel()
try:
df = defer.Deferred(cancel)
except TypeError:
df = defer.Deferred() # handle older versions of Twisted
def timeout():
self.map.pop(id)
df.errback(failure.Failure(defer.TimeoutError('in GenericDeferrer')))
self.on_timeout()
timer = reactor.callLater(self.timeout, timeout)
self.map[id] = df, timer
self.func(id, *args, **kwargs)
return df
def got_response(self, id, resp):
if id not in self.map:
return
df, timer = self.map.pop(id)
timer.cancel()
df.callback(resp)
def respond_all(self, resp):
while self.map:
id, (df, timer) = self.map.popitem()
timer.cancel()
df.errback(resp)
class NotNowError(Exception):
pass
class DeferredCacher(object):
'''
like memoize, but for functions that return Deferreds
@DeferredCacher
def f(x):
...
return df
@DeferredCacher.with_backing(bsddb.hashopen(...))
def f(x):
...
return df
'''
@classmethod
def with_backing(cls, backing):
return lambda func: cls(func, backing)
def __init__(self, func, backing=None):
if backing is None:
backing = {}
self.func = func
self.backing = backing
self.waiting = {}
@defer.inlineCallbacks
def __call__(self, key):
if key in self.waiting:
yield self.waiting[key]
if key in self.backing:
defer.returnValue(self.backing[key])
else:
self.waiting[key] = defer.Deferred()
try:
value = yield self.func(key)
finally:
self.waiting.pop(key).callback(None)
self.backing[key] = value
defer.returnValue(value)
_nothing = object()
def call_now(self, key, default=_nothing):
if key in self.backing:
return self.backing[key]
if key not in self.waiting:
self.waiting[key] = defer.Deferred()
def cb(value):
self.backing[key] = value
self.waiting.pop(key).callback(None)
def eb(fail):
self.waiting.pop(key).callback(None)
if fail.check(RetrySilentlyException):
return
print
print 'Error when requesting noncached value:'
fail.printTraceback()
print
self.func(key).addCallback(cb).addErrback(eb)
if default is not self._nothing:
return default
raise NotNowError(key)
def deferred_has_been_called(df):
still_running = True
res2 = []
def cb(res):
if still_running:
res2[:] = [res]
else:
return res
df.addBoth(cb)
still_running = False
if res2:
return True, res2[0]
return False, None
def inlineCallbacks(f):
from functools import wraps
@wraps(f)
def _(*args, **kwargs):
gen = f(*args, **kwargs)
stop_running = [False]
def cancelled(df_):
assert df_ is df
stop_running[0] = True
if currently_waiting_on:
currently_waiting_on[0].cancel()
df = defer.Deferred(cancelled)
currently_waiting_on = []
def it(cur):
while True:
try:
if isinstance(cur, failure.Failure):
res = cur.throwExceptionIntoGenerator(gen) # external code is run here
else:
res = gen.send(cur) # external code is run here
if stop_running[0]:
return
except StopIteration:
df.callback(None)
except defer._DefGen_Return as e:
# XXX should make sure direct child threw
df.callback(e.value)
except:
df.errback()
else:
if isinstance(res, defer.Deferred):
called, res2 = deferred_has_been_called(res)
if called:
cur = res2
continue
else:
currently_waiting_on[:] = [res]
def gotResult(res2):
assert currently_waiting_on[0] is res
currently_waiting_on[:] = []
if stop_running[0]:
return
it(res2)
res.addBoth(gotResult) # external code is run between this and gotResult
else:
cur = res
continue
break
it(None)
return df
return _
class RobustLoopingCall(object):
def __init__(self, func, *args, **kwargs):
self.func, self.args, self.kwargs = func, args, kwargs
self.running = False
def start(self, period):
assert not self.running
self.running = True
self._df = self._worker(period).addErrback(lambda fail: fail.trap(defer.CancelledError))
@inlineCallbacks
def _worker(self, period):
assert self.running
while self.running:
try:
self.func(*self.args, **self.kwargs)
except:
log.err()
yield sleep(period)
def stop(self):
assert self.running
self.running = False
self._df.cancel()
return self._df
|
gpl-3.0
|
wanderknight/tushare
|
tushare/__init__.py
|
1
|
2582
|
__version__ = '0.4.1'
__author__ = 'Jimmy Liu'
"""
for trading data
"""
from tushare.stock.trading import (get_hist_data, get_tick_data,
get_today_all, get_realtime_quotes,
get_h_data, get_today_ticks,
get_index, get_hists,
get_sina_dd)
"""
for trading data
"""
from tushare.stock.fundamental import (get_stock_basics, get_report_data,
get_profit_data,
get_operation_data, get_growth_data,
get_debtpaying_data, get_cashflow_data)
"""
for macro data
"""
from tushare.stock.macro import (get_gdp_year, get_gdp_quarter,
get_gdp_for, get_gdp_pull,
get_gdp_contrib, get_cpi,
get_ppi, get_deposit_rate,
get_loan_rate, get_rrr,
get_money_supply, get_money_supply_bal)
"""
for classifying data
"""
from tushare.stock.classifying import (get_industry_classified, get_concept_classified,
get_area_classified, get_gem_classified,
get_sme_classified, get_st_classified,
get_hs300s, get_sz50s, get_zz500s,
get_terminated, get_suspended)
"""
for macro data
"""
from tushare.stock.newsevent import (get_latest_news, latest_content,
get_notices, notice_content,
guba_sina)
"""
for reference
"""
from tushare.stock.reference import (profit_data, forecast_data,
xsg_data, fund_holdings,
new_stocks, sh_margins,
sh_margin_details,
sz_margins, sz_margin_details)
"""
for shibor
"""
from tushare.stock.shibor import (shibor_data, shibor_quote_data,
shibor_ma_data, lpr_data,
lpr_ma_data)
"""
for LHB
"""
from tushare.stock.billboard import (top_list, cap_tops, broker_tops,
inst_tops, inst_detail)
"""
for DataYes Token
"""
from tushare.util.upass import (set_token, get_token)
from tushare.datayes.api import *
|
bsd-3-clause
|
htzy/bigfour
|
common/djangoapps/track/tests/test_shim.py
|
111
|
4737
|
"""Ensure emitted events contain the fields legacy processors expect to find."""
from mock import sentinel
from django.test.utils import override_settings
from openedx.core.lib.tests.assertions.events import assert_events_equal
from track.tests import EventTrackingTestCase, FROZEN_TIME
LEGACY_SHIM_PROCESSOR = [
{
'ENGINE': 'track.shim.LegacyFieldMappingProcessor'
}
]
GOOGLE_ANALYTICS_PROCESSOR = [
{
'ENGINE': 'track.shim.GoogleAnalyticsProcessor'
}
]
@override_settings(
EVENT_TRACKING_PROCESSORS=LEGACY_SHIM_PROCESSOR,
)
class LegacyFieldMappingProcessorTestCase(EventTrackingTestCase):
"""Ensure emitted events contain the fields legacy processors expect to find."""
def test_event_field_mapping(self):
data = {sentinel.key: sentinel.value}
context = {
'accept_language': sentinel.accept_language,
'referer': sentinel.referer,
'username': sentinel.username,
'session': sentinel.session,
'ip': sentinel.ip,
'host': sentinel.host,
'agent': sentinel.agent,
'path': sentinel.path,
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'accept_language': sentinel.accept_language,
'referer': sentinel.referer,
'event_type': sentinel.name,
'name': sentinel.name,
'context': {
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'path': sentinel.path,
},
'event': data,
'username': sentinel.username,
'event_source': 'server',
'time': FROZEN_TIME,
'agent': sentinel.agent,
'host': sentinel.host,
'ip': sentinel.ip,
'page': None,
'session': sentinel.session,
}
assert_events_equal(expected_event, emitted_event)
def test_missing_fields(self):
self.tracker.emit(sentinel.name)
emitted_event = self.get_event()
expected_event = {
'accept_language': '',
'referer': '',
'event_type': sentinel.name,
'name': sentinel.name,
'context': {},
'event': {},
'username': '',
'event_source': 'server',
'time': FROZEN_TIME,
'agent': '',
'host': '',
'ip': '',
'page': None,
'session': '',
}
assert_events_equal(expected_event, emitted_event)
@override_settings(
EVENT_TRACKING_PROCESSORS=GOOGLE_ANALYTICS_PROCESSOR,
)
class GoogleAnalyticsProcessorTestCase(EventTrackingTestCase):
"""Ensure emitted events contain the fields necessary for Google Analytics."""
def test_event_fields(self):
""" Test that course_id is added as the label if present, and nonInteraction is set. """
data = {sentinel.key: sentinel.value}
context = {
'path': sentinel.path,
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'context': context,
'data': data,
'label': sentinel.course_id,
'name': sentinel.name,
'nonInteraction': 1,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, emitted_event)
def test_no_course_id(self):
""" Test that a label is not added if course_id is not specified, but nonInteraction is still set. """
data = {sentinel.key: sentinel.value}
context = {
'path': sentinel.path,
'user_id': sentinel.user_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'context': context,
'data': data,
'name': sentinel.name,
'nonInteraction': 1,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, emitted_event)
|
agpl-3.0
|
belemizz/mimic2_tools
|
clinical_db/classify_patients.py
|
1
|
3203
|
"""
classify patients based on lab tests
"""
import get_sample.mimic2
from mutil import Graph
import mutil.mycsv
import time
import datetime
import random
import numpy as np
import theano
import theano.tensor as T
import alg.classification
def main( max_id = 2000, target_codes = ['428.0'], show_flag = True):
mimic2db = get_sample.mimic2.Mimic2()
graph = Graph()
## Get Subject ID ##
id_list = mimic2db.subject_with_icd9_codes(target_codes)
subject_ids = [item for item in id_list if item < max_id]
print "Number of Candidates : %d"%len(subject_ids)
## Get Data ##
days_before_discharge = [0]
recover_values = [[], [], [], []]
expire_values = [[], [], [], []]
start_time = time.clock()
algo_num = 0
time_diff = 4
cr_id = 50090
bun_id = 50177
for str_id in subject_ids:
sid = int(str_id)
print sid
patient = mimic2db.get_subject(sid)
if patient:
final_adm = patient.get_final_admission()
if len(final_adm.icd9)>0 and final_adm.icd9[0][3] == target_codes[0]:
for index, dbd in enumerate(days_before_discharge):
if algo_num == 0:
# bun_and_creatinine
time_of_interest = final_adm.disch_dt + datetime.timedelta(1-dbd)
lab_result = final_adm.get_newest_lab_at_time(time_of_interest)
value1 = [item[4] for item in lab_result if item[0] == cr_id]
value2 = [item[4] for item in lab_result if item[0] == bun_id]
else:
# trend of BUN
time_of_interest1 = final_adm.disch_dt + datetime.timedelta(1-dbd)
time_of_interest2 = final_adm.disch_dt + datetime.timedelta(1-dbd-time_diff)
lab_result1 = final_adm.get_newest_lab_at_time(time_of_interest1)
lab_result2 = final_adm.get_newest_lab_at_time(time_of_interest2)
value1 = [item[4] for item in lab_result1 if item[0] == bun_id]
value2 = [item[4] for item in lab_result2 if item[0] == bun_id]
if patient.hospital_expire_flg == 'Y':
expire_values[index].append([value1, value2])
else:
recover_values[index].append([value1, value2])
end_time = time.clock()
print "data_retrieving_time: %f sec"%(end_time - start_time)
def transform_values(input_values):
""" transform to numpy format """
temp = []
for item in input_values:
if len(item[0])>0 and len(item[1])>0:
temp.append([float(item[0][0]), float(item[1][0])])
return np.array(temp)
positive_x = transform_values(expire_values[0])
negative_x = transform_values(recover_values[0])
data = [[item, 1] for item in positive_x]
data.extend([[item, 0] for item in negative_x])
random.shuffle(data)
x = np.array([item[0] for item in data])
y = np.array([item[1] for item in data])
if __name__ == '__main__':
main()
|
mit
|
Jokeren/neon
|
tests/serialization/cifar10_conv.py
|
2
|
3454
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Small CIFAR10 based convolutional neural network adapted from neon examples
for use in serialization testing.
"""
import numpy as np
from neon.data import ArrayIterator, load_cifar10
from neon.initializers import Uniform
from neon.layers import Affine, Conv, Pooling, GeneralizedCost
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Misclassification, Rectlin, Softmax, CrossEntropyMulti
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser
from neon import logger as neon_logger
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
# hyperparameters
if args.datatype in [np.float16]:
cost_scale = 10.
num_epochs = args.epochs
(X_train, y_train), (X_test, y_test), nclass = load_cifar10(path=args.data_dir)
Nmax = X_train.shape[0] // args.batch_size
Nmax *= args.batch_size
X_train = X_train[0:Nmax, :]
y_train = y_train[0:Nmax, :]
train = ArrayIterator(X_train, y_train, nclass=nclass, lshape=(3, 32, 32), name='train')
test = ArrayIterator(X_test, y_test, nclass=nclass, lshape=(3, 32, 32), name='test')
init_uni = Uniform(low=-0.1, high=0.1)
if args.datatype in [np.float32, np.float64]:
opt_gdm = GradientDescentMomentum(learning_rate=0.01,
momentum_coef=0.9,
stochastic_round=args.rounding)
elif args.datatype in [np.float16]:
opt_gdm = GradientDescentMomentum(learning_rate=0.01 / cost_scale,
momentum_coef=0.9,
stochastic_round=args.rounding)
layers = [Conv((5, 5, 16), init=init_uni, activation=Rectlin(), batch_norm=True),
Pooling((2, 2)),
Conv((5, 5, 32), init=init_uni, activation=Rectlin(), batch_norm=True),
Pooling((2, 2)),
Affine(nout=500, init=init_uni, activation=Rectlin(), batch_norm=True),
Affine(nout=10, init=init_uni, activation=Softmax())]
if args.datatype in [np.float32, np.float64]:
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
elif args.datatype in [np.float16]:
cost = GeneralizedCost(costfunc=CrossEntropyMulti(scale=cost_scale))
model = Model(layers=layers)
# configure callbacks
callbacks = Callbacks(model, eval_set=test, **args.callback_args)
# callbacks = Callbacks.load_callbacks(callbacks.get_description(), model, data=[train, test])
model.fit(train, optimizer=opt_gdm, num_epochs=num_epochs, cost=cost, callbacks=callbacks)
error_rate = model.eval(test, metric=Misclassification())
neon_logger.display('Misclassification error = %.1f%%' % (error_rate * 100))
|
apache-2.0
|
Erotemic/utool
|
utool/util_logging.py
|
1
|
20540
|
# -*- coding: utf-8 -*-
"""
If logging is on, utool will overwrite the print function with a logging function
This is a special module which will not get injected into (should it be internal?)
References:
# maybe we can do something like this Queue to try fixing error when
# when using injected print statments with Qt signals and slots
http://stackoverflow.com/questions/21071448/redirecting-stdout-and-stderr-to-a-pyqt4-qtextedit-from-a-secondary-thread
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import six
from six.moves import builtins, map, zip, range # NOQA
from os.path import exists, join, realpath
import logging
import logging.config
import multiprocessing
import os
import sys
from utool._internal import meta_util_arg, meta_util_six
VERBOSE = meta_util_arg.VERBOSE
VERYVERBOSE = meta_util_arg.VERYVERBOSE
PRINT_ALL_CALLERS = meta_util_arg.PRINT_ALL_CALLERS
LOGGING_VERBOSE = meta_util_arg.LOGGING_VERBOSE # --verb-logging
PRINT_INJECT_ORDER = meta_util_arg.PRINT_INJECT_ORDER
def __inside_doctest(original_stdout=sys.stdout):
return original_stdout != sys.stdout
__IN_MAIN_PROCESS__ = multiprocessing.current_process().name == 'MainProcess'
__UTOOL_ROOT_LOGGER__ = None
__CURRENT_LOG_FPATH__ = None
# Remeber original python values
# __PYTHON_STDOUT__ = sys.stdout
# __PYTHON_PRINT__ = builtins.print
# __PYTHON_WRITE__ = __PYTHON_STDOUT__.write
# __PYTHON_FLUSH__ = __PYTHON_STDOUT__.flush
# Initialize utool values
__UTOOL_STDOUT__ = None
__UTOOL_PRINT__ = None
# TODO: Allow write and flush to have a logging equivalent
__UTOOL_WRITE__ = None
__UTOOL_FLUSH__ = None
__UTOOL_WRITE_BUFFER__ = []
def _utool_stdout():
if __UTOOL_STDOUT__ is not None:
return __UTOOL_STDOUT__
else:
return sys.stdout
def _utool_write():
if __UTOOL_WRITE__ is not None:
return __UTOOL_WRITE__
else:
return sys.stdout.write
def _utool_flush():
if __UTOOL_FLUSH__ is not None:
return __UTOOL_FLUSH__
else:
return sys.stdout.flush
def _utool_print():
if __UTOOL_PRINT__ is not None:
return __UTOOL_PRINT__
else:
return builtins.print
__STR__ = six.text_type
logdir_cacheid = 'log_dpath'
def testlogprog():
r"""
Test to ensure that all progress lines are outputed to the file logger
while only a few progress lines are outputed to stdout.
(if backspace is specified)
CommandLine:
python -m utool.util_logging testlogprog --show --verb-logging
python -m utool.util_logging testlogprog --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_logging import * # NOQA
>>> import utool as ut
>>> result = testlogprog()
>>> print(result)
"""
import utool as ut
print('Starting test log function')
def test_body(count, logmode, backspace):
ut.colorprint('\n---- count = %r -----' % (count,), 'yellow')
ut.colorprint('backspace = %r' % (backspace,), 'yellow')
ut.colorprint('logmode = %r' % (logmode,), 'yellow')
if logmode:
ut.delete('test.log')
ut.start_logging('test.log')
print('Start main loop')
import time
for count in ut.ProgressIter(range(20), freq=3, backspace=backspace):
time.sleep(.01)
print('Done with main loop work')
print('Exiting main body')
if logmode:
ut.stop_logging()
#print('-----DONE LOGGING----')
testlog_text = ut.readfrom('test.log')
print(ut.indent(testlog_text.replace('\r', '\n'), ' '))
def test_body2(count, logmode, backspace):
ut.colorprint('\n---- count = %r -----' % (count,), 'yellow')
ut.colorprint('backspace = %r' % (backspace,), 'yellow')
ut.colorprint('logmode = %r' % (logmode,), 'yellow')
if logmode:
ut.delete('test.log')
ut.start_logging('test.log')
print('Start main loop')
import time
for count in ut.ProgressIter(range(2), freq=1, backspace=backspace):
for count in ut.ProgressIter(range(50), freq=1, backspace=backspace):
time.sleep(.01)
print('Done with main loop work')
print('Exiting main body')
if logmode:
ut.stop_logging()
#print('-----DONE LOGGING----')
#testlog_text = ut.readfrom('test.log')
#print(ut.indent(testlog_text.replace('\r', '\n'), ' '))
#test_body(0, False, True)
#test_body(1, False, False)
#test_body(2, True, True)
#test_body(3, True, False)
test_body2(4, True, True)
test_body2(5, False, True)
def ensure_logging():
flag = is_logging()
if not flag:
start_logging()
return flag
def is_logging():
global __UTOOL_ROOT_LOGGER__
flag = __UTOOL_ROOT_LOGGER__ is not None
return flag
def debug_logging_iostreams():
print(' --- <DEBUG IOSTREAMS> --')
print('__STR__ = %r' % (__STR__,))
print('__IN_MAIN_PROCESS__ = %r' % (__IN_MAIN_PROCESS__,))
print('__UTOOL_ROOT_LOGGER__ = %r' % (__UTOOL_ROOT_LOGGER__,))
print('__CURRENT_LOG_FPATH__ = %r' % (__CURRENT_LOG_FPATH__,))
# print('__PYTHON_STDOUT__ = %r' % (__PYTHON_STDOUT__,))
# print('__PYTHON_PRINT__ = %r' % (__PYTHON_PRINT__,))
# print('__PYTHON_WRITE__ = %r' % (__PYTHON_WRITE__,))
# print('__PYTHON_FLUSH__ = %r' % (__PYTHON_FLUSH__,))
print('__UTOOL_STDOUT__ = %r' % (__UTOOL_STDOUT__,))
print('__UTOOL_PRINT__ = %r' % (__UTOOL_PRINT__,))
print('__UTOOL_FLUSH__ = %r' % (__UTOOL_FLUSH__,))
print('__UTOOL_WRITE__ = %r' % (__UTOOL_WRITE__,))
print(' --- </DEBUG IOSTREAMS> --')
def get_logging_dir(appname='default'):
"""
The default log dir is in the system resource directory
But the utool global cache allows for the user to override
where the logs for a specific app should be stored.
Returns:
log_dir_realpath (str): real path to logging directory
"""
from utool._internal import meta_util_cache
from utool._internal import meta_util_cplat
from utool import util_cache
if appname is None or appname == 'default':
appname = util_cache.get_default_appname()
resource_dpath = meta_util_cplat.get_resource_dir()
default = join(resource_dpath, appname, 'logs')
# Check global cache for a custom logging dir otherwise
# use the default.
log_dir = meta_util_cache.global_cache_read(logdir_cacheid,
appname=appname,
default=default)
log_dir_realpath = realpath(log_dir)
return log_dir_realpath
def get_shelves_dir(appname='default'):
"""
The default shelf dir is in the system resource directory
But the utool global cache allows for the user to override
where the shelf for a specific app should be stored.
Returns:
log_dir_realpath (str): real path to shelves directory
"""
from utool._internal import meta_util_cache
from utool._internal import meta_util_cplat
from utool import util_cache
if appname is None or appname == 'default':
appname = util_cache.get_default_appname()
resource_dpath = meta_util_cplat.get_resource_dir()
default = join(resource_dpath, appname, 'shelves')
# Check global cache for a custom logging dir otherwise
# use the default.
log_dir = meta_util_cache.global_cache_read(logdir_cacheid,
appname=appname,
default=default)
log_dir_realpath = realpath(log_dir)
return log_dir_realpath
def get_current_log_fpath():
global __CURRENT_LOG_FPATH__
return __CURRENT_LOG_FPATH__
def get_current_log_text():
fpath = get_current_log_fpath()
if fpath is None:
text = None
else:
with open(fpath, 'r') as file_:
text = file_.read()
return text
def get_log_fpath(num='next', appname=None, log_dir=None):
"""
Returns:
log_fpath (str): path to log file
"""
if log_dir is None:
log_dir = get_logging_dir(appname=appname)
if not exists(log_dir):
os.makedirs(log_dir)
if appname is not None:
log_fname = appname + '_logs_%04d.out'
else:
log_fname = 'utool_logs_%04d.out'
if isinstance(num, six.string_types):
if num == 'next':
count = 0
log_fpath = join(log_dir, log_fname % count)
while exists(log_fpath):
log_fpath = join(log_dir, log_fname % count)
count += 1
else:
log_fpath = join(log_dir, log_fname % num)
return log_fpath
def get_utool_logger():
return __UTOOL_ROOT_LOGGER__
def add_logging_handler(handler, format_='file'):
"""
mostly for util_logging internals
"""
global __UTOOL_ROOT_LOGGER__
if __UTOOL_ROOT_LOGGER__ is None:
builtins.print('[WARNING] logger not started, cannot add handler')
return
# create formatter and add it to the handlers
#logformat = '%Y-%m-%d %H:%M:%S'
#logformat = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
timeformat = '%H:%M:%S'
if format_ == 'file':
logformat = '[%(asctime)s]%(message)s'
elif format_ == 'stdout':
logformat = '%(message)s'
else:
raise AssertionError('unknown logging format_: %r' % format_)
# Create formatter for handlers
formatter = logging.Formatter(logformat, timeformat)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
__UTOOL_ROOT_LOGGER__.addHandler(handler)
class CustomStreamHandler(logging.Handler):
"""
Modified from logging.py
"""
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
self.terminator = "\n"
logging.Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
fs = "%s%s"
if six.PY3 or not logging._unicode: # if no unicode support...
stream.write(fs % (msg, self.terminator))
else:
try:
if (isinstance(msg, unicode) and getattr(stream, 'encoding', None)):
ufs = u'%s%s'
try:
stream.write(ufs % (msg, self.terminator))
except UnicodeEncodeError:
#Printing to terminals sometimes fails. For example,
#with an encoding of 'cp1251', the above write will
#work if written to a stream opened or wrapped by
#the codecs module, but fail when writing to a
#terminal even when the codepage is set to cp1251.
#An extra encoding step seems to be needed.
stream.write((ufs % (msg, self.terminator)).encode(stream.encoding))
else:
stream.write(fs % (msg, self.terminator))
except UnicodeError:
stream.write(fs % (msg.encode("UTF-8"), self.terminator.encode("UTF-8")))
#self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def start_logging(log_fpath=None, mode='a', appname='default', log_dir=None):
r"""
Overwrites utool print functions to use a logger
CommandLine:
python -m utool.util_logging --test-start_logging:0
python -m utool.util_logging --test-start_logging:1
Example0:
>>> # DISABLE_DOCTEST
>>> import sys
>>> sys.argv.append('--verb-logging')
>>> import utool as ut
>>> ut.start_logging()
>>> ut.util_logging._utool_print()('hello world')
>>> ut.util_logging._utool_write()('writing1')
>>> ut.util_logging._utool_write()('writing2\n')
>>> ut.util_logging._utool_write()('writing3')
>>> ut.util_logging._utool_flush()()
>>> handler = ut.util_logging.__UTOOL_ROOT_LOGGER__.handlers[0]
>>> current_log_fpath = handler.stream.name
>>> current_log_text = ut.read_from(current_log_fpath)
>>> print('current_log_text =\n%s' % (current_log_text,))
>>> assert current_log_text.find('hello world') > 0, 'cant hello world'
>>> assert current_log_text.find('writing1writing2') > 0, 'cant find writing1writing2'
>>> assert current_log_text.find('writing3') > 0, 'cant find writing3'
Example1:
>>> # DISABLE_DOCTEST
>>> # Ensure that progress is logged
>>> import sys
>>> sys.argv.append('--verb-logging')
>>> import utool as ut
>>> ut.start_logging()
>>> [x for x in ut.ProgressIter(range(0, 1000), freq=4)]
>>> handler = ut.util_logging.__UTOOL_ROOT_LOGGER__.handlers[0]
>>> current_log_fpath = handler.stream.name
>>> current_log_text = ut.read_from(current_log_fpath)
>>> assert current_log_text.find('rate') > 0, 'progress was not logged'
>>> print(current_log_text)
"""
global __UTOOL_ROOT_LOGGER__
global __UTOOL_PRINT__
global __UTOOL_WRITE__
global __UTOOL_FLUSH__
global __CURRENT_LOG_FPATH__
if LOGGING_VERBOSE:
print('[utool] start_logging()')
# FIXME: The test for doctest may not work
if __UTOOL_ROOT_LOGGER__ is None and __IN_MAIN_PROCESS__ and not __inside_doctest():
if LOGGING_VERBOSE:
print('[utool] start_logging()... rootcheck OK')
#logging.config.dictConfig(LOGGING)
if log_fpath is None:
log_fpath = get_log_fpath(num='next', appname=appname, log_dir=log_dir)
__CURRENT_LOG_FPATH__ = log_fpath
# Print what is about to happen
if VERBOSE or LOGGING_VERBOSE:
startmsg = ('logging to log_fpath=%r' % log_fpath)
_utool_print()(startmsg)
# Create root logger
__UTOOL_ROOT_LOGGER__ = logging.getLogger('root')
__UTOOL_ROOT_LOGGER__.setLevel('DEBUG')
# create file handler which logs even debug messages
#fh = logging.handlers.WatchedFileHandler(log_fpath)
logfile_handler = logging.FileHandler(log_fpath, mode=mode)
#stdout_handler = logging.StreamHandler(__UTOOL_STDOUT__)
stdout_handler = CustomStreamHandler(__UTOOL_STDOUT__)
stdout_handler.terminator = ''
# http://stackoverflow.com/questions/7168790/suppress-newline-in-python-logging-module
#stdout_handler.terminator = ''
add_logging_handler(logfile_handler, format_='file')
add_logging_handler(stdout_handler, format_='stdout')
__UTOOL_ROOT_LOGGER__.propagate = False
__UTOOL_ROOT_LOGGER__.setLevel(logging.DEBUG)
# Overwrite utool functions with the logging functions
def utool_flush(*args):
""" flushes whatever is in the current utool write buffer """
# Flushes only the stdout handler
stdout_handler.flush()
#__UTOOL_ROOT_LOGGER__.flush()
#global __UTOOL_WRITE_BUFFER__
#if len(__UTOOL_WRITE_BUFFER__) > 0:
# msg = ''.join(__UTOOL_WRITE_BUFFER__)
# #sys.stdout.write('FLUSHING %r\n' % (len(__UTOOL_WRITE_BUFFER__)))
# __UTOOL_WRITE_BUFFER__ = []
# return __UTOOL_ROOT_LOGGER__.info(msg)
#__PYTHON_FLUSH__()
def utool_write(*args):
""" writes to current utool logs and to sys.stdout.write """
#global __UTOOL_WRITE_BUFFER__
#sys.stdout.write('WRITEING\n')
msg = ', '.join(map(six.text_type, args))
#__UTOOL_WRITE_BUFFER__.append(msg)
__UTOOL_ROOT_LOGGER__.info(msg)
#if msg.endswith('\n'):
# # Flush on newline, and remove newline
# __UTOOL_WRITE_BUFFER__[-1] = __UTOOL_WRITE_BUFFER__[-1][:-1]
# utool_flush()
#elif len(__UTOOL_WRITE_BUFFER__) > 32:
# # Flush if buffer is too large
# utool_flush()
if not PRINT_ALL_CALLERS:
def utool_print(*args):
""" standard utool print function """
#sys.stdout.write('PRINT\n')
endline = '\n'
try:
msg = ', '.join(map(six.text_type, args))
return __UTOOL_ROOT_LOGGER__.info(msg + endline)
except UnicodeDecodeError:
new_msg = ', '.join(map(meta_util_six.ensure_unicode, args))
#print(new_msg)
return __UTOOL_ROOT_LOGGER__.info(new_msg + endline)
else:
def utool_print(*args):
""" debugging utool print function """
import utool as ut
utool_flush()
endline = '\n'
__UTOOL_ROOT_LOGGER__.info('\n\n----------')
__UTOOL_ROOT_LOGGER__.info(ut.get_caller_name(range(0, 20)))
return __UTOOL_ROOT_LOGGER__.info(', '.join(map(six.text_type, args)) + endline)
def utool_printdbg(*args):
""" DRPRICATE standard utool print debug function """
return __UTOOL_ROOT_LOGGER__.debug(', '.join(map(six.text_type, args)))
# overwrite the utool printers
__UTOOL_WRITE__ = utool_write
__UTOOL_FLUSH__ = utool_flush
__UTOOL_PRINT__ = utool_print
# Test out our shiney new logger
if VERBOSE or LOGGING_VERBOSE:
__UTOOL_PRINT__('<__LOG_START__>')
__UTOOL_PRINT__(startmsg)
else:
if LOGGING_VERBOSE:
print('[utool] start_logging()... FAILED TO START')
print('DEBUG INFO')
print('__inside_doctest() = %r' % (__inside_doctest(),))
print('__IN_MAIN_PROCESS__ = %r' % (__IN_MAIN_PROCESS__,))
print('__UTOOL_ROOT_LOGGER__ = %r' % (__UTOOL_ROOT_LOGGER__,))
def stop_logging():
"""
Restores utool print functions to python defaults
"""
global __UTOOL_ROOT_LOGGER__
global __UTOOL_PRINT__
global __UTOOL_WRITE__
global __UTOOL_FLUSH__
if __UTOOL_ROOT_LOGGER__ is not None:
# Flush remaining buffer
if VERBOSE or LOGGING_VERBOSE:
_utool_print()()('<__LOG_STOP__>')
_utool_flush()()
# Remove handlers
for h in __UTOOL_ROOT_LOGGER__.handlers[:]:
__UTOOL_ROOT_LOGGER__.removeHandler(h)
# Reset objects
__UTOOL_ROOT_LOGGER__ = None
__UTOOL_PRINT__ = None
__UTOOL_WRITE__ = None
__UTOOL_FLUSH__ = None
# HAVE TO HACK THIS IN FOR UTOOL SPECIAL CASE ONLY
# OTHER MODULE CAN USE NOINJECT
if PRINT_INJECT_ORDER:
from utool._internal import meta_util_dbg
callername = meta_util_dbg.get_caller_name(N=1, strict=False)
fmtdict = dict(callername=callername, modname='utool.util_logging')
msg = '[util_inject] {modname} is imported by {callername}'.format(**fmtdict)
builtins.print(msg)
if __name__ == '__main__':
"""
CommandLine:
python -m utool.util_logging
python -m utool.util_logging --allexamples
python -m utool.util_logging --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
apache-2.0
|
rahushen/ansible
|
lib/ansible/modules/cloud/spotinst/spotinst_aws_elastigroup.py
|
45
|
48701
|
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: spotinst_aws_elastigroup
version_added: 2.5
short_description: Create, update or delete Spotinst AWS Elastigroups
author: Spotinst
description:
- Can create, update, or delete Spotinst AWS Elastigroups
Launch configuration is part of the elastigroup configuration,
so no additional modules are necessary for handling the launch configuration.
You will have to have a credentials file in this location - <home>/.spotinst/credentials
The credentials file must contain a row that looks like this
token = <YOUR TOKEN>
Full documentation available at https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-
requirements:
- spotinst >= 1.0.21
- python >= 2.7
options:
credentials_path:
description:
- (String) Optional parameter that allows to set a non-default credentials path.
Default is ~/.spotinst/credentials
account_id:
description:
- (String) Optional parameter that allows to set an account-id inside the module configuration
By default this is retrieved from the credentials path
availability_vs_cost:
choices:
- availabilityOriented
- costOriented
- balanced
description:
- (String) The strategy orientation.
required: true
availability_zones:
description:
- (List of Objects) a list of hash/dictionaries of Availability Zones that are configured in the elastigroup;
'[{"key":"value", "key":"value"}]';
keys allowed are
name (String),
subnet_id (String),
placement_group_name (String),
required: true
block_device_mappings:
description:
- (List of Objects) a list of hash/dictionaries of Block Device Mappings for elastigroup instances;
You can specify virtual devices and EBS volumes.;
'[{"key":"value", "key":"value"}]';
keys allowed are
device_name (List of Strings),
virtual_name (String),
no_device (String),
ebs (Object, expects the following keys-
delete_on_termination(Boolean),
encrypted(Boolean),
iops (Integer),
snapshot_id(Integer),
volume_type(String),
volume_size(Integer))
chef:
description:
- (Object) The Chef integration configuration.;
Expects the following keys - chef_server (String),
organization (String),
user (String),
pem_key (String),
chef_version (String)
draining_timeout:
description:
- (Integer) Time for instance to be drained from incoming requests and deregistered from ELB before termination.
ebs_optimized:
description:
- (Boolean) Enable EBS optimization for supported instances which are not enabled by default.;
Note - additional charges will be applied.
ebs_volume_pool:
description:
- (List of Objects) a list of hash/dictionaries of EBS devices to reattach to the elastigroup when available;
'[{"key":"value", "key":"value"}]';
keys allowed are -
volume_ids (List of Strings),
device_name (String)
ecs:
description:
- (Object) The ECS integration configuration.;
Expects the following key -
cluster_name (String)
elastic_ips:
description:
- (List of Strings) List of ElasticIps Allocation Ids to associate to the group instances
fallback_to_od:
description:
- (Boolean) In case of no spots available, Elastigroup will launch an On-demand instance instead
health_check_grace_period:
description:
- (Integer) The amount of time, in seconds, after the instance has launched to start and check its health.
default: 300
health_check_unhealthy_duration_before_replacement:
description:
- (Integer) Minimal mount of time instance should be unhealthy for us to consider it unhealthy.
health_check_type:
choices:
- ELB
- HCS
- TARGET_GROUP
- MLB
- EC2
description:
- (String) The service to use for the health check.
iam_role_name:
description:
- (String) The instance profile iamRole name
- Only use iam_role_arn, or iam_role_name
iam_role_arn:
description:
- (String) The instance profile iamRole arn
- Only use iam_role_arn, or iam_role_name
id:
description:
- (String) The group id if it already exists and you want to update, or delete it.
This will not work unless the uniqueness_by field is set to id.
When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created.
ignore_changes:
choices:
- image_id
- target
description:
- (List of Strings) list of fields on which changes should be ignored when updating
image_id:
description:
- (String) The image Id used to launch the instance.;
In case of conflict between Instance type and image type, an error will be returned
required: true
key_pair:
description:
- (String) Specify a Key Pair to attach to the instances
required: true
kubernetes:
description:
- (Object) The Kubernetes integration configuration.
Expects the following keys -
api_server (String),
token (String)
lifetime_period:
description:
- (String) lifetime period
load_balancers:
description:
- (List of Strings) List of classic ELB names
max_size:
description:
- (Integer) The upper limit number of instances that you can scale up to
required: true
mesosphere:
description:
- (Object) The Mesosphere integration configuration.
Expects the following key -
api_server (String)
min_size:
description:
- (Integer) The lower limit number of instances that you can scale down to
required: true
monitoring:
description:
- (Boolean) Describes whether instance Enhanced Monitoring is enabled
required: true
name:
description:
- (String) Unique name for elastigroup to be created, updated or deleted
required: true
network_interfaces:
description:
- (List of Objects) a list of hash/dictionaries of network interfaces to add to the elastigroup;
'[{"key":"value", "key":"value"}]';
keys allowed are -
description (String),
device_index (Integer),
secondary_private_ip_address_count (Integer),
associate_public_ip_address (Boolean),
delete_on_termination (Boolean),
groups (List of Strings),
network_interface_id (String),
private_ip_address (String),
subnet_id (String),
associate_ipv6_address (Boolean),
private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean))
on_demand_count:
description:
- (Integer) Required if risk is not set
- Number of on demand instances to launch. All other instances will be spot instances.;
Either set this parameter or the risk parameter
on_demand_instance_type:
description:
- (String) On-demand instance type that will be provisioned
required: true
opsworks:
description:
- (Object) The elastigroup OpsWorks integration configration.;
Expects the following key -
layer_id (String)
persistence:
description:
- (Object) The Stateful elastigroup configration.;
Accepts the following keys -
should_persist_root_device (Boolean),
should_persist_block_devices (Boolean),
should_persist_private_ip (Boolean)
product:
choices:
- Linux/UNIX
- SUSE Linux
- Windows
- Linux/UNIX (Amazon VPC)
- SUSE Linux (Amazon VPC)
- Windows
description:
- (String) Operation system type._
required: true
rancher:
description:
- (Object) The Rancher integration configuration.;
Expects the following keys -
access_key (String),
secret_key (String),
master_host (String)
right_scale:
description:
- (Object) The Rightscale integration configuration.;
Expects the following keys -
account_id (String),
refresh_token (String)
risk:
description:
- (Integer) required if on demand is not set. The percentage of Spot instances to launch (0 - 100).
roll_config:
description:
- (Object) Roll configuration.;
If you would like the group to roll after updating, please use this feature.
Accepts the following keys -
batch_size_percentage(Integer, Required),
grace_period - (Integer, Required),
health_check_type(String, Optional)
scheduled_tasks:
description:
- (List of Objects) a list of hash/dictionaries of scheduled tasks to configure in the elastigroup;
'[{"key":"value", "key":"value"}]';
keys allowed are -
adjustment (Integer),
scale_target_capacity (Integer),
scale_min_capacity (Integer),
scale_max_capacity (Integer),
adjustment_percentage (Integer),
batch_size_percentage (Integer),
cron_expression (String),
frequency (String),
grace_period (Integer),
task_type (String, required),
is_enabled (Boolean)
security_group_ids:
description:
- (List of Strings) One or more security group IDs. ;
In case of update it will override the existing Security Group with the new given array
required: true
shutdown_script:
description:
- (String) The Base64-encoded shutdown script that executes prior to instance termination.
Encode before setting.
signals:
description:
- (List of Objects) a list of hash/dictionaries of signals to configure in the elastigroup;
keys allowed are -
name (String, required),
timeout (Integer)
spin_up_time:
description:
- (Integer) spin up time, in seconds, for the instance
spot_instance_types:
description:
- (List of Strings) Spot instance type that will be provisioned.
required: true
state:
choices:
- present
- absent
description:
- (String) create or delete the elastigroup
tags:
description:
- (List of tagKey:tagValue paris) a list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value);
target:
description:
- (Integer) The number of instances to launch
required: true
target_group_arns:
description:
- (List of Strings) List of target group arns instances should be registered to
tenancy:
choices:
- default
- dedicated
description:
- (String) dedicated vs shared tenancy
terminate_at_end_of_billing_hour:
description:
- (Boolean) terminate at the end of billing hour
unit:
choices:
- instance
- weight
description:
- (String) The capacity unit to launch instances by.
required: true
up_scaling_policies:
description:
- (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup;
'[{"key":"value", "key":"value"}]';
keys allowed are -
policy_name (String, required),
namespace (String, required),
metric_name (String, required),
dimensions (List of Objects, Keys allowed are name (String, required) and value (String)),
statistic (String, required)
evaluation_periods (String, required),
period (String, required),
threshold (String, required),
cooldown (String, required),
unit (String, required),
operator (String, required),
action_type (String, required),
adjustment (String),
min_target_capacity (String),
target (String),
maximum (String),
minimum (String)
down_scaling_policies:
description:
- (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup;
'[{"key":"value", "key":"value"}]';
keys allowed are -
policy_name (String, required),
namespace (String, required),
metric_name (String, required),
dimensions ((List of Objects), Keys allowed are name (String, required) and value (String)),
statistic (String, required),
evaluation_periods (String, required),
period (String, required),
threshold (String, required),
cooldown (String, required),
unit (String, required),
operator (String, required),
action_type (String, required),
adjustment (String),
max_target_capacity (String),
target (String),
maximum (String),
minimum (String)
target_tracking_policies:
description:
- (List of Objects) a list of hash/dictionaries of target tracking policies to configure in the elastigroup;
'[{"key":"value", "key":"value"}]';
keys allowed are -
policy_name (String, required),
namespace (String, required),
source (String, required),
metric_name (String, required),
statistic (String, required),
unit (String, required),
cooldown (String, required),
target (String, required)
uniqueness_by:
choices:
- id
- name
description:
- (String) If your group names are not unique, you may use this feature to update or delete a specific group.
Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created.
user_data:
description:
- (String) Base64-encoded MIME user data. Encode before setting the value.
utilize_reserved_instances:
description:
- (Boolean) In case of any available Reserved Instances,
Elastigroup will utilize your reservations before purchasing Spot instances.
wait_for_instances:
description:
- (Boolean) Whether or not the elastigroup creation / update actions should wait for the instances to spin
wait_timeout:
description:
- (Integer) How long the module should wait for instances before failing the action.;
Only works if wait_for_instances is True.
"""
EXAMPLES = '''
# Basic configuration YAML example
- hosts: localhost
tasks:
- name: create elastigroup
spotinst_aws_elastigroup:
state: present
risk: 100
availability_vs_cost: balanced
availability_zones:
- name: us-west-2a
subnet_id: subnet-2b68a15c
image_id: ami-f173cc91
key_pair: spotinst-oregon
max_size: 15
min_size: 0
target: 0
unit: instance
monitoring: True
name: ansible-group
on_demand_instance_type: c3.large
product: Linux/UNIX
load_balancers:
- test-lb-1
security_group_ids:
- sg-8f4b8fe9
spot_instance_types:
- c3.large
do_not_update:
- image_id
- target
register: result
- debug: var=result
# In this example, we create an elastigroup and wait 600 seconds to retrieve the instances, and use their private ips
- hosts: localhost
tasks:
- name: create elastigroup
spotinst_aws_elastigroup:
state: present
account_id: act-1a9dd2b
risk: 100
availability_vs_cost: balanced
availability_zones:
- name: us-west-2a
subnet_id: subnet-2b68a15c
tags:
- Environment: someEnvValue
- OtherTagKey: otherValue
image_id: ami-f173cc91
key_pair: spotinst-oregon
max_size: 5
min_size: 0
target: 0
unit: instance
monitoring: True
name: ansible-group-tal
on_demand_instance_type: c3.large
product: Linux/UNIX
security_group_ids:
- sg-8f4b8fe9
block_device_mappings:
- device_name: '/dev/sda1'
ebs:
volume_size: 100
volume_type: gp2
spot_instance_types:
- c3.large
do_not_update:
- image_id
wait_for_instances: True
wait_timeout: 600
register: result
- name: Store private ips to file
shell: echo {{ item.private_ip }}\\n >> list-of-private-ips
with_items: "{{ result.instances }}"
- debug: var=result
# In this example, we create an elastigroup with multiple block device mappings, tags, and also an account id
# In organizations with more than one account, it is required to specify an account_id
- hosts: localhost
tasks:
- name: create elastigroup
spotinst_aws_elastigroup:
state: present
account_id: act-1a9dd2b
risk: 100
availability_vs_cost: balanced
availability_zones:
- name: us-west-2a
subnet_id: subnet-2b68a15c
tags:
- Environment: someEnvValue
- OtherTagKey: otherValue
image_id: ami-f173cc91
key_pair: spotinst-oregon
max_size: 5
min_size: 0
target: 0
unit: instance
monitoring: True
name: ansible-group-tal
on_demand_instance_type: c3.large
product: Linux/UNIX
security_group_ids:
- sg-8f4b8fe9
block_device_mappings:
- device_name: '/dev/xvda'
ebs:
volume_size: 60
volume_type: gp2
- device_name: '/dev/xvdb'
ebs:
volume_size: 120
volume_type: gp2
spot_instance_types:
- c3.large
do_not_update:
- image_id
wait_for_instances: True
wait_timeout: 600
register: result
- name: Store private ips to file
shell: echo {{ item.private_ip }}\\n >> list-of-private-ips
with_items: "{{ result.instances }}"
- debug: var=result
# In this example we have set up block device mapping with ephemeral devices
- hosts: localhost
tasks:
- name: create elastigroup
spotinst_aws_elastigroup:
state: present
risk: 100
availability_vs_cost: balanced
availability_zones:
- name: us-west-2a
subnet_id: subnet-2b68a15c
image_id: ami-f173cc91
key_pair: spotinst-oregon
max_size: 15
min_size: 0
target: 0
unit: instance
block_device_mappings:
- device_name: '/dev/xvda'
virtual_name: ephemeral0
- device_name: '/dev/xvdb/'
virtual_name: ephemeral1
monitoring: True
name: ansible-group
on_demand_instance_type: c3.large
product: Linux/UNIX
load_balancers:
- test-lb-1
security_group_ids:
- sg-8f4b8fe9
spot_instance_types:
- c3.large
do_not_update:
- image_id
- target
register: result
- debug: var=result
# In this example we create a basic group configuration with a network interface defined.
# Each network interface must have a device index
- hosts: localhost
tasks:
- name: create elastigroup
spotinst_aws_elastigroup:
state: present
risk: 100
availability_vs_cost: balanced
network_interfaces:
- associate_public_ip_address: true
device_index: 0
availability_zones:
- name: us-west-2a
subnet_id: subnet-2b68a15c
image_id: ami-f173cc91
key_pair: spotinst-oregon
max_size: 15
min_size: 0
target: 0
unit: instance
monitoring: True
name: ansible-group
on_demand_instance_type: c3.large
product: Linux/UNIX
load_balancers:
- test-lb-1
security_group_ids:
- sg-8f4b8fe9
spot_instance_types:
- c3.large
do_not_update:
- image_id
- target
register: result
- debug: var=result
# In this example we create a basic group configuration with a target tracking scaling policy defined
- hosts: localhost
tasks:
- name: create elastigroup
spotinst_aws_elastigroup:
account_id: act-92d45673
state: present
risk: 100
availability_vs_cost: balanced
availability_zones:
- name: us-west-2a
subnet_id: subnet-79da021e
image_id: ami-f173cc91
fallback_to_od: true
tags:
- Creator: ValueOfCreatorTag
- Environment: ValueOfEnvironmentTag
key_pair: spotinst-labs-oregon
max_size: 10
min_size: 0
target: 2
unit: instance
monitoring: True
name: ansible-group-1
on_demand_instance_type: c3.large
product: Linux/UNIX
security_group_ids:
- sg-46cdc13d
spot_instance_types:
- c3.large
target_tracking_policies:
- policy_name: target-tracking-1
namespace: AWS/EC2
metric_name: CPUUtilization
statistic: average
unit: percent
target: 50
cooldown: 120
do_not_update:
- image_id
register: result
- debug: var=result
'''
RETURN = '''
---
instances:
description: List of active elastigroup instances and their details.
returned: success
type: dict
sample: [
{
"spotInstanceRequestId": "sir-regs25zp",
"instanceId": "i-09640ad8678234c",
"instanceType": "m4.large",
"product": "Linux/UNIX",
"availabilityZone": "us-west-2b",
"privateIp": "180.0.2.244",
"createdAt": "2017-07-17T12:46:18.000Z",
"status": "fulfilled"
}
]
group_id:
description: Created / Updated group's ID.
returned: success
type: string
sample: "sig-12345"
'''
HAS_SPOTINST_SDK = False
__metaclass__ = type
import os
import time
from ansible.module_utils.basic import AnsibleModule
try:
import spotinst
from spotinst import SpotinstClientException
HAS_SPOTINST_SDK = True
except ImportError:
pass
eni_fields = ('description',
'device_index',
'secondary_private_ip_address_count',
'associate_public_ip_address',
'delete_on_termination',
'groups',
'network_interface_id',
'private_ip_address',
'subnet_id',
'associate_ipv6_address')
private_ip_fields = ('private_ip_address',
'primary')
capacity_fields = (dict(ansible_field_name='min_size',
spotinst_field_name='minimum'),
dict(ansible_field_name='max_size',
spotinst_field_name='maximum'),
'target',
'unit')
lspec_fields = ('user_data',
'key_pair',
'tenancy',
'shutdown_script',
'monitoring',
'ebs_optimized',
'image_id',
'health_check_type',
'health_check_grace_period',
'health_check_unhealthy_duration_before_replacement',
'security_group_ids')
iam_fields = (dict(ansible_field_name='iam_role_name',
spotinst_field_name='name'),
dict(ansible_field_name='iam_role_arn',
spotinst_field_name='arn'))
scheduled_task_fields = ('adjustment',
'adjustment_percentage',
'batch_size_percentage',
'cron_expression',
'frequency',
'grace_period',
'task_type',
'is_enabled',
'scale_target_capacity',
'scale_min_capacity',
'scale_max_capacity')
scaling_policy_fields = ('policy_name',
'namespace',
'metric_name',
'dimensions',
'statistic',
'evaluation_periods',
'period',
'threshold',
'cooldown',
'unit',
'operator')
tracking_policy_fields = ('policy_name',
'namespace',
'source',
'metric_name',
'statistic',
'unit',
'cooldown',
'target',
'threshold')
action_fields = (dict(ansible_field_name='action_type',
spotinst_field_name='type'),
'adjustment',
'min_target_capacity',
'max_target_capacity',
'target',
'minimum',
'maximum')
signal_fields = ('name',
'timeout')
multai_lb_fields = ('balancer_id',
'project_id',
'target_set_id',
'az_awareness',
'auto_weight')
persistence_fields = ('should_persist_root_device',
'should_persist_block_devices',
'should_persist_private_ip')
strategy_fields = ('risk',
'utilize_reserved_instances',
'fallback_to_od',
'on_demand_count',
'availability_vs_cost',
'draining_timeout',
'spin_up_time',
'lifetime_period')
ebs_fields = ('delete_on_termination',
'encrypted',
'iops',
'snapshot_id',
'volume_type',
'volume_size')
bdm_fields = ('device_name',
'virtual_name',
'no_device')
kubernetes_fields = ('api_server',
'token')
right_scale_fields = ('account_id',
'refresh_token')
rancher_fields = ('access_key',
'secret_key',
'master_host')
chef_fields = ('chef_server',
'organization',
'user',
'pem_key',
'chef_version')
az_fields = ('name',
'subnet_id',
'placement_group_name')
opsworks_fields = ('layer_id',)
scaling_strategy_fields = ('terminate_at_end_of_billing_hour',)
mesosphere_fields = ('api_server',)
ecs_fields = ('cluster_name',)
multai_fields = ('multai_token',)
def handle_elastigroup(client, module):
has_changed = False
should_create = False
group_id = None
message = 'None'
name = module.params.get('name')
state = module.params.get('state')
uniqueness_by = module.params.get('uniqueness_by')
external_group_id = module.params.get('id')
if uniqueness_by == 'id':
if external_group_id is None:
should_create = True
else:
should_create = False
group_id = external_group_id
else:
groups = client.get_elastigroups()
should_create, group_id = find_group_with_same_name(groups, name)
if should_create is True:
if state == 'present':
eg = expand_elastigroup(module, is_update=False)
module.debug(str(" [INFO] " + message + "\n"))
group = client.create_elastigroup(group=eg)
group_id = group['id']
message = 'Created group Successfully.'
has_changed = True
elif state == 'absent':
message = 'Cannot delete non-existent group.'
has_changed = False
else:
eg = expand_elastigroup(module, is_update=True)
if state == 'present':
group = client.update_elastigroup(group_update=eg, group_id=group_id)
message = 'Updated group successfully.'
try:
roll_config = module.params.get('roll_config')
if roll_config:
eg_roll = spotinst.aws_elastigroup.Roll(
batch_size_percentage=roll_config.get('batch_size_percentage'),
grace_period=roll_config.get('grace_period'),
health_check_type=roll_config.get('health_check_type')
)
roll_response = client.roll_group(group_roll=eg_roll, group_id=group_id)
message = 'Updated and started rolling the group successfully.'
except SpotinstClientException as exc:
message = 'Updated group successfully, but failed to perform roll. Error:' + str(exc)
has_changed = True
elif state == 'absent':
try:
client.delete_elastigroup(group_id=group_id)
except SpotinstClientException as exc:
if "GROUP_DOESNT_EXIST" in exc.message:
pass
else:
module.fail_json(msg="Error while attempting to delete group : " + exc.message)
message = 'Deleted group successfully.'
has_changed = True
return group_id, message, has_changed
def retrieve_group_instances(client, module, group_id):
wait_timeout = module.params.get('wait_timeout')
wait_for_instances = module.params.get('wait_for_instances')
if wait_timeout is None:
wait_timeout = 300
wait_timeout = time.time() + wait_timeout
target = module.params.get('target')
state = module.params.get('state')
instances = list()
if state == 'present' and group_id is not None and wait_for_instances is True:
is_amount_fulfilled = False
while is_amount_fulfilled is False and wait_timeout > time.time():
instances = list()
amount_of_fulfilled_instances = 0
active_instances = client.get_elastigroup_active_instances(group_id=group_id)
for active_instance in active_instances:
if active_instance.get('private_ip') is not None:
amount_of_fulfilled_instances += 1
instances.append(active_instance)
if amount_of_fulfilled_instances >= target:
is_amount_fulfilled = True
time.sleep(10)
return instances
def find_group_with_same_name(groups, name):
for group in groups:
if group['name'] == name:
return False, group.get('id')
return True, None
def expand_elastigroup(module, is_update):
do_not_update = module.params['do_not_update']
name = module.params.get('name')
eg = spotinst.aws_elastigroup.Elastigroup()
description = module.params.get('description')
if name is not None:
eg.name = name
if description is not None:
eg.description = description
# Capacity
expand_capacity(eg, module, is_update, do_not_update)
# Strategy
expand_strategy(eg, module)
# Scaling
expand_scaling(eg, module)
# Third party integrations
expand_integrations(eg, module)
# Compute
expand_compute(eg, module, is_update, do_not_update)
# Multai
expand_multai(eg, module)
# Scheduling
expand_scheduled_tasks(eg, module)
return eg
def expand_compute(eg, module, is_update, do_not_update):
elastic_ips = module.params['elastic_ips']
on_demand_instance_type = module.params.get('on_demand_instance_type')
spot_instance_types = module.params['spot_instance_types']
ebs_volume_pool = module.params['ebs_volume_pool']
availability_zones_list = module.params['availability_zones']
product = module.params.get('product')
eg_compute = spotinst.aws_elastigroup.Compute()
if product is not None:
# Only put product on group creation
if is_update is not True:
eg_compute.product = product
if elastic_ips is not None:
eg_compute.elastic_ips = elastic_ips
if on_demand_instance_type or spot_instance_types is not None:
eg_instance_types = spotinst.aws_elastigroup.InstanceTypes()
if on_demand_instance_type is not None:
eg_instance_types.spot = spot_instance_types
if spot_instance_types is not None:
eg_instance_types.ondemand = on_demand_instance_type
if eg_instance_types.spot is not None or eg_instance_types.ondemand is not None:
eg_compute.instance_types = eg_instance_types
expand_ebs_volume_pool(eg_compute, ebs_volume_pool)
eg_compute.availability_zones = expand_list(availability_zones_list, az_fields, 'AvailabilityZone')
expand_launch_spec(eg_compute, module, is_update, do_not_update)
eg.compute = eg_compute
def expand_ebs_volume_pool(eg_compute, ebs_volumes_list):
if ebs_volumes_list is not None:
eg_volumes = []
for volume in ebs_volumes_list:
eg_volume = spotinst.aws_elastigroup.EbsVolume()
if volume.get('device_name') is not None:
eg_volume.device_name = volume.get('device_name')
if volume.get('volume_ids') is not None:
eg_volume.volume_ids = volume.get('volume_ids')
if eg_volume.device_name is not None:
eg_volumes.append(eg_volume)
if len(eg_volumes) > 0:
eg_compute.ebs_volume_pool = eg_volumes
def expand_launch_spec(eg_compute, module, is_update, do_not_update):
eg_launch_spec = expand_fields(lspec_fields, module.params, 'LaunchSpecification')
if module.params['iam_role_arn'] is not None or module.params['iam_role_name'] is not None:
eg_launch_spec.iam_role = expand_fields(iam_fields, module.params, 'IamRole')
tags = module.params['tags']
load_balancers = module.params['load_balancers']
target_group_arns = module.params['target_group_arns']
block_device_mappings = module.params['block_device_mappings']
network_interfaces = module.params['network_interfaces']
if is_update is True:
if 'image_id' in do_not_update:
delattr(eg_launch_spec, 'image_id')
expand_tags(eg_launch_spec, tags)
expand_load_balancers(eg_launch_spec, load_balancers, target_group_arns)
expand_block_device_mappings(eg_launch_spec, block_device_mappings)
expand_network_interfaces(eg_launch_spec, network_interfaces)
eg_compute.launch_specification = eg_launch_spec
def expand_integrations(eg, module):
rancher = module.params.get('rancher')
mesosphere = module.params.get('mesosphere')
ecs = module.params.get('ecs')
kubernetes = module.params.get('kubernetes')
right_scale = module.params.get('right_scale')
opsworks = module.params.get('opsworks')
chef = module.params.get('chef')
integration_exists = False
eg_integrations = spotinst.aws_elastigroup.ThirdPartyIntegrations()
if mesosphere is not None:
eg_integrations.mesosphere = expand_fields(mesosphere_fields, mesosphere, 'Mesosphere')
integration_exists = True
if ecs is not None:
eg_integrations.ecs = expand_fields(ecs_fields, ecs, 'EcsConfiguration')
integration_exists = True
if kubernetes is not None:
eg_integrations.kubernetes = expand_fields(kubernetes_fields, kubernetes, 'KubernetesConfiguration')
integration_exists = True
if right_scale is not None:
eg_integrations.right_scale = expand_fields(right_scale_fields, right_scale, 'RightScaleConfiguration')
integration_exists = True
if opsworks is not None:
eg_integrations.opsworks = expand_fields(opsworks_fields, opsworks, 'OpsWorksConfiguration')
integration_exists = True
if rancher is not None:
eg_integrations.rancher = expand_fields(rancher_fields, rancher, 'Rancher')
integration_exists = True
if chef is not None:
eg_integrations.chef = expand_fields(chef_fields, chef, 'ChefConfiguration')
integration_exists = True
if integration_exists:
eg.third_parties_integration = eg_integrations
def expand_capacity(eg, module, is_update, do_not_update):
eg_capacity = expand_fields(capacity_fields, module.params, 'Capacity')
if is_update is True:
delattr(eg_capacity, 'unit')
if 'target' in do_not_update:
delattr(eg_capacity, 'target')
eg.capacity = eg_capacity
def expand_strategy(eg, module):
persistence = module.params.get('persistence')
signals = module.params.get('signals')
eg_strategy = expand_fields(strategy_fields, module.params, 'Strategy')
terminate_at_end_of_billing_hour = module.params.get('terminate_at_end_of_billing_hour')
if terminate_at_end_of_billing_hour is not None:
eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields,
module.params, 'ScalingStrategy')
if persistence is not None:
eg_strategy.persistence = expand_fields(persistence_fields, persistence, 'Persistence')
if signals is not None:
eg_signals = expand_list(signals, signal_fields, 'Signal')
if len(eg_signals) > 0:
eg_strategy.signals = eg_signals
eg.strategy = eg_strategy
def expand_multai(eg, module):
multai_load_balancers = module.params.get('multai_load_balancers')
eg_multai = expand_fields(multai_fields, module.params, 'Multai')
if multai_load_balancers is not None:
eg_multai_load_balancers = expand_list(multai_load_balancers, multai_lb_fields, 'MultaiLoadBalancer')
if len(eg_multai_load_balancers) > 0:
eg_multai.balancers = eg_multai_load_balancers
eg.multai = eg_multai
def expand_scheduled_tasks(eg, module):
scheduled_tasks = module.params.get('scheduled_tasks')
if scheduled_tasks is not None:
eg_scheduling = spotinst.aws_elastigroup.Scheduling()
eg_tasks = expand_list(scheduled_tasks, scheduled_task_fields, 'ScheduledTask')
if len(eg_tasks) > 0:
eg_scheduling.tasks = eg_tasks
eg.scheduling = eg_scheduling
def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns):
if load_balancers is not None or target_group_arns is not None:
eg_load_balancers_config = spotinst.aws_elastigroup.LoadBalancersConfig()
eg_total_lbs = []
if load_balancers is not None:
for elb_name in load_balancers:
eg_elb = spotinst.aws_elastigroup.LoadBalancer()
if elb_name is not None:
eg_elb.name = elb_name
eg_elb.type = 'CLASSIC'
eg_total_lbs.append(eg_elb)
if target_group_arns is not None:
for target_arn in target_group_arns:
eg_elb = spotinst.aws_elastigroup.LoadBalancer()
if target_arn is not None:
eg_elb.arn = target_arn
eg_elb.type = 'TARGET_GROUP'
eg_total_lbs.append(eg_elb)
if len(eg_total_lbs) > 0:
eg_load_balancers_config.load_balancers = eg_total_lbs
eg_launchspec.load_balancers_config = eg_load_balancers_config
def expand_tags(eg_launchspec, tags):
if tags is not None:
eg_tags = []
for tag in tags:
eg_tag = spotinst.aws_elastigroup.Tag()
if tag.keys():
eg_tag.tag_key = tag.keys()[0]
if tag.values():
eg_tag.tag_value = tag.values()[0]
eg_tags.append(eg_tag)
if len(eg_tags) > 0:
eg_launchspec.tags = eg_tags
def expand_block_device_mappings(eg_launchspec, bdms):
if bdms is not None:
eg_bdms = []
for bdm in bdms:
eg_bdm = expand_fields(bdm_fields, bdm, 'BlockDeviceMapping')
if bdm.get('ebs') is not None:
eg_bdm.ebs = expand_fields(ebs_fields, bdm.get('ebs'), 'EBS')
eg_bdms.append(eg_bdm)
if len(eg_bdms) > 0:
eg_launchspec.block_device_mappings = eg_bdms
def expand_network_interfaces(eg_launchspec, enis):
if enis is not None:
eg_enis = []
for eni in enis:
eg_eni = expand_fields(eni_fields, eni, 'NetworkInterface')
eg_pias = expand_list(eni.get('private_ip_addresses'), private_ip_fields, 'PrivateIpAddress')
if eg_pias is not None:
eg_eni.private_ip_addresses = eg_pias
eg_enis.append(eg_eni)
if len(eg_enis) > 0:
eg_launchspec.network_interfaces = eg_enis
def expand_scaling(eg, module):
up_scaling_policies = module.params['up_scaling_policies']
down_scaling_policies = module.params['down_scaling_policies']
target_tracking_policies = module.params['target_tracking_policies']
eg_scaling = spotinst.aws_elastigroup.Scaling()
if up_scaling_policies is not None:
eg_up_scaling_policies = expand_scaling_policies(up_scaling_policies)
if len(eg_up_scaling_policies) > 0:
eg_scaling.up = eg_up_scaling_policies
if down_scaling_policies is not None:
eg_down_scaling_policies = expand_scaling_policies(down_scaling_policies)
if len(eg_down_scaling_policies) > 0:
eg_scaling.down = eg_down_scaling_policies
if target_tracking_policies is not None:
eg_target_tracking_policies = expand_target_tracking_policies(target_tracking_policies)
if len(eg_target_tracking_policies) > 0:
eg_scaling.target = eg_target_tracking_policies
if eg_scaling.down is not None or eg_scaling.up is not None or eg_scaling.target is not None:
eg.scaling = eg_scaling
def expand_list(items, fields, class_name):
if items is not None:
new_objects_list = []
for item in items:
new_obj = expand_fields(fields, item, class_name)
new_objects_list.append(new_obj)
return new_objects_list
def expand_fields(fields, item, class_name):
class_ = getattr(spotinst.aws_elastigroup, class_name)
new_obj = class_()
# Handle primitive fields
if item is not None:
for field in fields:
if isinstance(field, dict):
ansible_field_name = field['ansible_field_name']
spotinst_field_name = field['spotinst_field_name']
else:
ansible_field_name = field
spotinst_field_name = field
if item.get(ansible_field_name) is not None:
setattr(new_obj, spotinst_field_name, item.get(ansible_field_name))
return new_obj
def expand_scaling_policies(scaling_policies):
eg_scaling_policies = []
for policy in scaling_policies:
eg_policy = expand_fields(scaling_policy_fields, policy, 'ScalingPolicy')
eg_policy.action = expand_fields(action_fields, policy, 'ScalingPolicyAction')
eg_scaling_policies.append(eg_policy)
return eg_scaling_policies
def expand_target_tracking_policies(tracking_policies):
eg_tracking_policies = []
for policy in tracking_policies:
eg_policy = expand_fields(tracking_policy_fields, policy, 'TargetTrackingPolicy')
eg_tracking_policies.append(eg_policy)
return eg_tracking_policies
def main():
fields = dict(
account_id=dict(type='str'),
availability_vs_cost=dict(type='str', required=True),
availability_zones=dict(type='list', required=True),
block_device_mappings=dict(type='list'),
chef=dict(type='dict'),
credentials_path=dict(type='path', default="~/.spotinst/credentials"),
do_not_update=dict(default=[], type='list'),
down_scaling_policies=dict(type='list'),
draining_timeout=dict(type='int'),
ebs_optimized=dict(type='bool'),
ebs_volume_pool=dict(type='list'),
ecs=dict(type='dict'),
elastic_beanstalk=dict(type='dict'),
elastic_ips=dict(type='list'),
fallback_to_od=dict(type='bool'),
id=dict(type='str'),
health_check_grace_period=dict(type='int'),
health_check_type=dict(type='str'),
health_check_unhealthy_duration_before_replacement=dict(type='int'),
iam_role_arn=dict(type='str'),
iam_role_name=dict(type='str'),
image_id=dict(type='str', required=True),
key_pair=dict(type='str'),
kubernetes=dict(type='dict'),
lifetime_period=dict(type='int'),
load_balancers=dict(type='list'),
max_size=dict(type='int', required=True),
mesosphere=dict(type='dict'),
min_size=dict(type='int', required=True),
monitoring=dict(type='str'),
multai_load_balancers=dict(type='list'),
multai_token=dict(type='str'),
name=dict(type='str', required=True),
network_interfaces=dict(type='list'),
on_demand_count=dict(type='int'),
on_demand_instance_type=dict(type='str'),
opsworks=dict(type='dict'),
persistence=dict(type='dict'),
product=dict(type='str', required=True),
rancher=dict(type='dict'),
right_scale=dict(type='dict'),
risk=dict(type='int'),
roll_config=dict(type='dict'),
scheduled_tasks=dict(type='list'),
security_group_ids=dict(type='list', required=True),
shutdown_script=dict(type='str'),
signals=dict(type='list'),
spin_up_time=dict(type='int'),
spot_instance_types=dict(type='list', required=True),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(type='list'),
target=dict(type='int', required=True),
target_group_arns=dict(type='list'),
tenancy=dict(type='str'),
terminate_at_end_of_billing_hour=dict(type='bool'),
token=dict(type='str'),
unit=dict(type='str'),
user_data=dict(type='str'),
utilize_reserved_instances=dict(type='bool'),
uniqueness_by=dict(default='name', choices=['name', 'id']),
up_scaling_policies=dict(type='list'),
target_tracking_policies=dict(type='list'),
wait_for_instances=dict(type='bool', default=False),
wait_timeout=dict(type='int')
)
module = AnsibleModule(argument_spec=fields)
if not HAS_SPOTINST_SDK:
module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst)")
# Retrieve creds file variables
creds_file_loaded_vars = dict()
credentials_path = module.params.get('credentials_path')
try:
with open(credentials_path, "r") as creds:
for line in creds:
eq_index = line.find('=')
var_name = line[:eq_index].strip()
string_value = line[eq_index + 1:].strip()
creds_file_loaded_vars[var_name] = string_value
except IOError:
pass
# End of creds file retrieval
token = module.params.get('token')
if not token:
token = os.environ.get('SPOTINST_TOKEN')
if not token:
token = creds_file_loaded_vars.get("token")
account = module.params.get('account_id')
if not account:
account = os.environ.get('ACCOUNT')
if not account:
account = creds_file_loaded_vars.get("account")
client = spotinst.SpotinstClient(auth_token=token, print_output=False)
if account is not None:
client = spotinst.SpotinstClient(auth_token=token, print_output=False, account_id=account)
group_id, message, has_changed = handle_elastigroup(client=client, module=module)
instances = retrieve_group_instances(client=client, module=module, group_id=group_id)
module.exit_json(changed=has_changed, group_id=group_id, message=message, instances=instances)
if __name__ == '__main__':
main()
|
gpl-3.0
|
zhjunlang/kbengine
|
kbe/res/scripts/common/Lib/codecs.py
|
84
|
35956
|
""" codecs -- Python Codec Registry, API and helpers.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import builtins, sys
### Registry and builtin stateless codec functions
try:
from _codecs import *
except ImportError as why:
raise SystemError('Failed to load the builtin codecs: %s' % why)
__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
"BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
"BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
"strict_errors", "ignore_errors", "replace_errors",
"xmlcharrefreplace_errors",
"register_error", "lookup_error"]
### Constants
#
# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
# and its possible byte string values
# for UTF8/UTF16/UTF32 output and little/big endian machines
#
# UTF-8
BOM_UTF8 = b'\xef\xbb\xbf'
# UTF-16, little endian
BOM_LE = BOM_UTF16_LE = b'\xff\xfe'
# UTF-16, big endian
BOM_BE = BOM_UTF16_BE = b'\xfe\xff'
# UTF-32, little endian
BOM_UTF32_LE = b'\xff\xfe\x00\x00'
# UTF-32, big endian
BOM_UTF32_BE = b'\x00\x00\xfe\xff'
if sys.byteorder == 'little':
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_LE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_LE
else:
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_BE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_BE
# Old broken names (don't use in new code)
BOM32_LE = BOM_UTF16_LE
BOM32_BE = BOM_UTF16_BE
BOM64_LE = BOM_UTF32_LE
BOM64_BE = BOM_UTF32_BE
### Codec base classes (defining the API)
class CodecInfo(tuple):
"""Codec details when looking up the codec registry"""
# Private API to allow Python 3.4 to blacklist the known non-Unicode
# codecs in the standard library. A more general mechanism to
# reliably distinguish test encodings from other codecs will hopefully
# be defined for Python 3.5
#
# See http://bugs.python.org/issue19619
_is_text_encoding = True # Assume codecs are text encodings by default
def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
incrementalencoder=None, incrementaldecoder=None, name=None,
*, _is_text_encoding=None):
self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
self.name = name
self.encode = encode
self.decode = decode
self.incrementalencoder = incrementalencoder
self.incrementaldecoder = incrementaldecoder
self.streamwriter = streamwriter
self.streamreader = streamreader
if _is_text_encoding is not None:
self._is_text_encoding = _is_text_encoding
return self
def __repr__(self):
return "<%s.%s object for encoding %s at 0x%x>" % \
(self.__class__.__module__, self.__class__.__name__,
self.name, id(self))
class Codec:
""" Defines the interface for stateless encoders/decoders.
The .encode()/.decode() methods may use different error
handling schemes by providing the errors argument. These
string values are predefined:
'strict' - raise a ValueError error (or a subclass)
'ignore' - ignore the character and continue with the next
'replace' - replace with a suitable replacement character;
Python will use the official U+FFFD REPLACEMENT
CHARACTER for the builtin Unicode codecs on
decoding and '?' on encoding.
'surrogateescape' - replace with private codepoints U+DCnn.
'xmlcharrefreplace' - Replace with the appropriate XML
character reference (only for encoding).
'backslashreplace' - Replace with backslashed escape sequences
(only for encoding).
The set of allowed values can be extended via register_error.
"""
def encode(self, input, errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The encoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
def decode(self, input, errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The decoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
class IncrementalEncoder(object):
"""
An IncrementalEncoder encodes an input in multiple steps. The input can
be passed piece by piece to the encode() method. The IncrementalEncoder
remembers the state of the encoding process between calls to encode().
"""
def __init__(self, errors='strict'):
"""
Creates an IncrementalEncoder instance.
The IncrementalEncoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
self.buffer = ""
def encode(self, input, final=False):
"""
Encodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the encoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the encoder.
"""
return 0
def setstate(self, state):
"""
Set the current state of the encoder. state must have been
returned by getstate().
"""
class BufferedIncrementalEncoder(IncrementalEncoder):
"""
This subclass of IncrementalEncoder can be used as the baseclass for an
incremental encoder if the encoder must keep some of the output in a
buffer between calls to encode().
"""
def __init__(self, errors='strict'):
IncrementalEncoder.__init__(self, errors)
# unencoded input that is kept between calls to encode()
self.buffer = ""
def _buffer_encode(self, input, errors, final):
# Overwrite this method in subclasses: It must encode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def encode(self, input, final=False):
# encode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_encode(data, self.errors, final)
# keep unencoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalEncoder.reset(self)
self.buffer = ""
def getstate(self):
return self.buffer or 0
def setstate(self, state):
self.buffer = state or ""
class IncrementalDecoder(object):
"""
An IncrementalDecoder decodes an input in multiple steps. The input can
be passed piece by piece to the decode() method. The IncrementalDecoder
remembers the state of the decoding process between calls to decode().
"""
def __init__(self, errors='strict'):
"""
Create a IncrementalDecoder instance.
The IncrementalDecoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
def decode(self, input, final=False):
"""
Decode input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Reset the decoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the decoder.
This must be a (buffered_input, additional_state_info) tuple.
buffered_input must be a bytes object containing bytes that
were passed to decode() that have not yet been converted.
additional_state_info must be a non-negative integer
representing the state of the decoder WITHOUT yet having
processed the contents of buffered_input. In the initial state
and after reset(), getstate() must return (b"", 0).
"""
return (b"", 0)
def setstate(self, state):
"""
Set the current state of the decoder.
state must have been returned by getstate(). The effect of
setstate((b"", 0)) must be equivalent to reset().
"""
class BufferedIncrementalDecoder(IncrementalDecoder):
"""
This subclass of IncrementalDecoder can be used as the baseclass for an
incremental decoder if the decoder must be able to handle incomplete
byte sequences.
"""
def __init__(self, errors='strict'):
IncrementalDecoder.__init__(self, errors)
# undecoded input that is kept between calls to decode()
self.buffer = b""
def _buffer_decode(self, input, errors, final):
# Overwrite this method in subclasses: It must decode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def decode(self, input, final=False):
# decode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_decode(data, self.errors, final)
# keep undecoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalDecoder.reset(self)
self.buffer = b""
def getstate(self):
# additional state info is always 0
return (self.buffer, 0)
def setstate(self, state):
# ignore additional state info
self.buffer = state[0]
#
# The StreamWriter and StreamReader class provide generic working
# interfaces which can be used to implement new encoding submodules
# very easily. See encodings/utf_8.py for an example on how this is
# done.
#
class StreamWriter(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamWriter instance.
stream must be a file-like object open for writing
(binary) data.
The StreamWriter may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
'xmlcharrefreplace' - Replace with the appropriate XML
character reference.
'backslashreplace' - Replace with backslashed escape
sequences (only for encoding).
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
def write(self, object):
""" Writes the object's contents encoded to self.stream.
"""
data, consumed = self.encode(object, self.errors)
self.stream.write(data)
def writelines(self, list):
""" Writes the concatenated list of strings to the stream
using .write().
"""
self.write(''.join(list))
def reset(self):
""" Flushes and resets the codec buffers used for keeping state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.
"""
pass
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
if whence == 0 and offset == 0:
self.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReader(Codec):
charbuffertype = str
def __init__(self, stream, errors='strict'):
""" Creates a StreamReader instance.
stream must be a file-like object open for reading
(binary) data.
The StreamReader may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character;
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
self.bytebuffer = b""
self._empty_charbuffer = self.charbuffertype()
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def decode(self, input, errors='strict'):
raise NotImplementedError
def read(self, size=-1, chars=-1, firstline=False):
""" Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of characters to read from the
stream. read() will never return more than chars
characters, but it might return less, if there are not enough
characters available.
size indicates the approximate maximum number of bytes to
read from the stream for decoding purposes. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.
"""
# If we have lines cached, first merge them back into characters
if self.linebuffer:
self.charbuffer = self._empty_charbuffer.join(self.linebuffer)
self.linebuffer = None
# read until we get the required number of characters (if available)
while True:
# can the request be satisfied from the character buffer?
if chars >= 0:
if len(self.charbuffer) >= chars:
break
elif size >= 0:
if len(self.charbuffer) >= size:
break
# we need more data
if size < 0:
newdata = self.stream.read()
else:
newdata = self.stream.read(size)
# decode bytes (those remaining from the last call included)
data = self.bytebuffer + newdata
if not data:
break
try:
newchars, decodedbytes = self.decode(data, self.errors)
except UnicodeDecodeError as exc:
if firstline:
newchars, decodedbytes = \
self.decode(data[:exc.start], self.errors)
lines = newchars.splitlines(keepends=True)
if len(lines)<=1:
raise
else:
raise
# keep undecoded bytes until the next call
self.bytebuffer = data[decodedbytes:]
# put new characters in the character buffer
self.charbuffer += newchars
# there was no data available
if not newdata:
break
if chars < 0:
# Return everything we've got
result = self.charbuffer
self.charbuffer = self._empty_charbuffer
else:
# Return the first chars characters
result = self.charbuffer[:chars]
self.charbuffer = self.charbuffer[chars:]
return result
def readline(self, size=None, keepends=True):
""" Read one line from the input stream and return the
decoded data.
size, if given, is passed as size argument to the
read() method.
"""
# If we have lines cached from an earlier read, return
# them unconditionally
if self.linebuffer:
line = self.linebuffer[0]
del self.linebuffer[0]
if len(self.linebuffer) == 1:
# revert to charbuffer mode; we might need more data
# next time
self.charbuffer = self.linebuffer[0]
self.linebuffer = None
if not keepends:
line = line.splitlines(keepends=False)[0]
return line
readsize = size or 72
line = self._empty_charbuffer
# If size is given, we call read() only once
while True:
data = self.read(readsize, firstline=True)
if data:
# If we're at a "\r" read one extra character (which might
# be a "\n") to get a proper line ending. If the stream is
# temporarily exhausted we return the wrong line ending.
if (isinstance(data, str) and data.endswith("\r")) or \
(isinstance(data, bytes) and data.endswith(b"\r")):
data += self.read(size=1, chars=1)
line += data
lines = line.splitlines(keepends=True)
if lines:
if len(lines) > 1:
# More than one line result; the first line is a full line
# to return
line = lines[0]
del lines[0]
if len(lines) > 1:
# cache the remaining lines
lines[-1] += self.charbuffer
self.linebuffer = lines
self.charbuffer = None
else:
# only one remaining line, put it back into charbuffer
self.charbuffer = lines[0] + self.charbuffer
if not keepends:
line = line.splitlines(keepends=False)[0]
break
line0withend = lines[0]
line0withoutend = lines[0].splitlines(keepends=False)[0]
if line0withend != line0withoutend: # We really have a line end
# Put the rest back together and keep it until the next call
self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \
self.charbuffer
if keepends:
line = line0withend
else:
line = line0withoutend
break
# we didn't get anything or this was our only try
if not data or size is not None:
if line and not keepends:
line = line.splitlines(keepends=False)[0]
break
if readsize < 8000:
readsize *= 2
return line
def readlines(self, sizehint=None, keepends=True):
""" Read all lines available on the input stream
and return them as list of lines.
Line breaks are implemented using the codec's decoder
method and are included in the list entries.
sizehint, if given, is ignored since there is no efficient
way to finding the true end-of-line.
"""
data = self.read()
return data.splitlines(keepends)
def reset(self):
""" Resets the codec buffers used for keeping state.
Note that no stream repositioning should take place.
This method is primarily intended to be able to recover
from decoding errors.
"""
self.bytebuffer = b""
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def seek(self, offset, whence=0):
""" Set the input stream's current position.
Resets the codec buffers used for keeping state.
"""
self.stream.seek(offset, whence)
self.reset()
def __next__(self):
""" Return the next decoded line from the input stream."""
line = self.readline()
if line:
return line
raise StopIteration
def __iter__(self):
return self
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReaderWriter:
""" StreamReaderWriter instances allow wrapping streams which
work in both read and write modes.
The design is such that one can use the factory functions
returned by the codec.lookup() function to construct the
instance.
"""
# Optional attributes set by the file wrappers below
encoding = 'unknown'
def __init__(self, stream, Reader, Writer, errors='strict'):
""" Creates a StreamReaderWriter instance.
stream must be a Stream-like object.
Reader, Writer must be factory functions or classes
providing the StreamReader, StreamWriter interface resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
return self.reader.read(size)
def readline(self, size=None):
return self.reader.readline(size)
def readlines(self, sizehint=None):
return self.reader.readlines(sizehint)
def __next__(self):
""" Return the next decoded line from the input stream."""
return next(self.reader)
def __iter__(self):
return self
def write(self, data):
return self.writer.write(data)
def writelines(self, list):
return self.writer.writelines(list)
def reset(self):
self.reader.reset()
self.writer.reset()
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
self.reader.reset()
if whence == 0 and offset == 0:
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
# these are needed to make "with codecs.open(...)" work properly
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamRecoder:
""" StreamRecoder instances provide a frontend - backend
view of encoding data.
They use the complete set of APIs returned by the
codecs.lookup() function to implement their task.
Data written to the stream is first decoded into an
intermediate format (which is dependent on the given codec
combination) and then written to the stream using an instance
of the provided Writer class.
In the other direction, data is read from the stream using a
Reader instance and then return encoded data to the caller.
"""
# Optional attributes set by the file wrappers below
data_encoding = 'unknown'
file_encoding = 'unknown'
def __init__(self, stream, encode, decode, Reader, Writer,
errors='strict'):
""" Creates a StreamRecoder instance which implements a two-way
conversion: encode and decode work on the frontend (the
input to .read() and output of .write()) while
Reader and Writer work on the backend (reading and
writing to the stream).
You can use these objects to do transparent direct
recodings from e.g. latin-1 to utf-8 and back.
stream must be a file-like object.
encode, decode must adhere to the Codec interface, Reader,
Writer must be factory functions or classes providing the
StreamReader, StreamWriter interface resp.
encode and decode are needed for the frontend translation,
Reader and Writer for the backend translation. Unicode is
used as intermediate encoding.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.encode = encode
self.decode = decode
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
data = self.reader.read(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readline(self, size=None):
if size is None:
data = self.reader.readline()
else:
data = self.reader.readline(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readlines(self, sizehint=None):
data = self.reader.read()
data, bytesencoded = self.encode(data, self.errors)
return data.splitlines(keepends=True)
def __next__(self):
""" Return the next decoded line from the input stream."""
data = next(self.reader)
data, bytesencoded = self.encode(data, self.errors)
return data
def __iter__(self):
return self
def write(self, data):
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def writelines(self, list):
data = ''.join(list)
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def reset(self):
self.reader.reset()
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
### Shortcuts
def open(filename, mode='rb', encoding=None, errors='strict', buffering=1):
""" Open an encoded file using the given mode and return
a wrapped version providing transparent encoding/decoding.
Note: The wrapped version will only accept the object format
defined by the codecs, i.e. Unicode objects for most builtin
codecs. Output is also codec dependent and will usually be
Unicode as well.
Files are always opened in binary mode, even if no binary mode
was specified. This is done to avoid data loss due to encodings
using 8-bit values. The default file mode is 'rb' meaning to
open the file in binary read mode.
encoding specifies the encoding which is to be used for the
file.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
buffering has the same meaning as for the builtin open() API.
It defaults to line buffered.
The returned wrapped file object provides an extra attribute
.encoding which allows querying the used encoding. This
attribute is only available if an encoding was specified as
parameter.
"""
if encoding is not None and \
'b' not in mode:
# Force opening of the file in binary mode
mode = mode + 'b'
file = builtins.open(filename, mode, buffering)
if encoding is None:
return file
info = lookup(encoding)
srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
# Add attributes to simplify introspection
srw.encoding = encoding
return srw
def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
""" Return a wrapped version of file which provides transparent
encoding translation.
Strings written to the wrapped file are interpreted according
to the given data_encoding and then written to the original
file as string using file_encoding. The intermediate encoding
will usually be Unicode but depends on the specified codecs.
Strings are read from the file using file_encoding and then
passed back to the caller as string using data_encoding.
If file_encoding is not given, it defaults to data_encoding.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
The returned wrapped file object provides two extra attributes
.data_encoding and .file_encoding which reflect the given
parameters of the same name. The attributes can be used for
introspection by Python programs.
"""
if file_encoding is None:
file_encoding = data_encoding
data_info = lookup(data_encoding)
file_info = lookup(file_encoding)
sr = StreamRecoder(file, data_info.encode, data_info.decode,
file_info.streamreader, file_info.streamwriter, errors)
# Add attributes to simplify introspection
sr.data_encoding = data_encoding
sr.file_encoding = file_encoding
return sr
### Helpers for codec lookup
def getencoder(encoding):
""" Lookup up the codec for the given encoding and return
its encoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).encode
def getdecoder(encoding):
""" Lookup up the codec for the given encoding and return
its decoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).decode
def getincrementalencoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalEncoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental encoder.
"""
encoder = lookup(encoding).incrementalencoder
if encoder is None:
raise LookupError(encoding)
return encoder
def getincrementaldecoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalDecoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental decoder.
"""
decoder = lookup(encoding).incrementaldecoder
if decoder is None:
raise LookupError(encoding)
return decoder
def getreader(encoding):
""" Lookup up the codec for the given encoding and return
its StreamReader class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamreader
def getwriter(encoding):
""" Lookup up the codec for the given encoding and return
its StreamWriter class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamwriter
def iterencode(iterator, encoding, errors='strict', **kwargs):
"""
Encoding iterator.
Encodes the input strings from the iterator using a IncrementalEncoder.
errors and kwargs are passed through to the IncrementalEncoder
constructor.
"""
encoder = getincrementalencoder(encoding)(errors, **kwargs)
for input in iterator:
output = encoder.encode(input)
if output:
yield output
output = encoder.encode("", True)
if output:
yield output
def iterdecode(iterator, encoding, errors='strict', **kwargs):
"""
Decoding iterator.
Decodes the input strings from the iterator using a IncrementalDecoder.
errors and kwargs are passed through to the IncrementalDecoder
constructor.
"""
decoder = getincrementaldecoder(encoding)(errors, **kwargs)
for input in iterator:
output = decoder.decode(input)
if output:
yield output
output = decoder.decode(b"", True)
if output:
yield output
### Helpers for charmap-based codecs
def make_identity_dict(rng):
""" make_identity_dict(rng) -> dict
Return a dictionary where elements of the rng sequence are
mapped to themselves.
"""
return {i:i for i in rng}
def make_encoding_map(decoding_map):
""" Creates an encoding map from a decoding map.
If a target mapping in the decoding map occurs multiple
times, then that target is mapped to None (undefined mapping),
causing an exception when encountered by the charmap codec
during translation.
One example where this happens is cp875.py which decodes
multiple character to \u001a.
"""
m = {}
for k,v in decoding_map.items():
if not v in m:
m[v] = k
else:
m[v] = None
return m
### error handlers
try:
strict_errors = lookup_error("strict")
ignore_errors = lookup_error("ignore")
replace_errors = lookup_error("replace")
xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
backslashreplace_errors = lookup_error("backslashreplace")
except LookupError:
# In --disable-unicode builds, these error handler are missing
strict_errors = None
ignore_errors = None
replace_errors = None
xmlcharrefreplace_errors = None
backslashreplace_errors = None
# Tell modulefinder that using codecs probably needs the encodings
# package
_false = 0
if _false:
import encodings
### Tests
if __name__ == '__main__':
# Make stdout translate Latin-1 output into UTF-8 output
sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
# Have stdin translate Latin-1 input into UTF-8 input
sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
|
lgpl-3.0
|
Stymphalian/sublime-plugins
|
KeyMapQuery/KeyMapQuery.py
|
1
|
8716
|
import sublime, sublime_plugin
import re
from collections import namedtuple
import os.path
"""
KeyMapQueryCommand allows you to quickly query if a key-binding is bound.
A combo-box will appear displayings a list of bound key-bindings. Type a key-combination
into the inptu box to narrow the results ( i.e. ctrl+k,ctrl+i ).
If there is a conflict in key-bindings,by default,the highest precendence match
is shown lower in the list.
i.e. if ctrl+o is bound in two files.
["ctrl+o" : command 1]
["ctrl+o" : command 2] <-- this is the one which actually gets used.
"""
class KeyMapQueryCommand(sublime_plugin.WindowCommand):
"""
InternalObject holds state during the execution of the command.
"""
class InternalObject(object):
KeyMap = namedtuple("KeyMap",["filename","bindings"])
def __init__(self):
self.keymaps = []
self.single_array = []
self.settings = sublime.load_settings("KeyMapQuery.sublime-settings")
def get_key_binding(self,index):
s = self.single_array[index]
return s.split(":")[0]
def get_relative_index(self,index):
count = 0
for d in self.keymaps:
if count <= index < count + len(d.bindings):
return index - count
else:
count += len(d.bindings)
raise IndexError("Index out of range")
# given an index from the on_select() callback
# determine the sublime-keymap filename which it belongs to.
def get_filename(self,index):
count = 0
for d in self.keymaps:
if count <= index < count + len(d.bindings):
return d.filename
else:
count += len(d.bindings)
raise IndexError("Index out of range")
# Given the keymap files we loaded in, flatten them into
# a single array of strings to be used by the window.show_quick_panel()
def get_string_list_of_keymaps(self):
# flatten the keymaps into a single array contains only the keybinding object
rs = []
for d in self.keymaps:
rs.extend(d.bindings)
# convert each key-binding into a string
# The format should look like
# ["ctrl+i","ctrl+j"] : command_to_be_run_1
# ["ctrl+i","ctrl+k"] : command_to_be_run_2
# ["ctrl+i","ctrl+l"] : command_to_be_run_3
def str_format(obj):
objs = map(lambda x: '"' + x +'"', obj["keys"])
return "{:30s} : {}".format("["+ ",".join(objs) + "]",obj["command"])
self.single_array = list(map(str_format,rs))
return self.single_array
# Load all the sublime-keymap files that are known to sublime.
# This includes keymap files zipped inside sublime-package directories.
def load_keymaps(self,file_re):
# Get all the keymap filenames
all_keymap_files = sublime.find_resources("*.sublime-keymap")
# sort them, such as described by the merging/precedence rules defined
# http://docs.sublimetext.info/en/latest/extensibility/packages.html?highlight=precedence
all_keymap_files.sort()
if self.settings.get("reverse_sort_order"):
all_keymap_files.reverse()
filtered_files = list(filter(lambda x : re.match(file_re,x) != None,all_keymap_files))
# Load the keymap files; decode them into pyobjects;
# and then convert them into KeyMap tuples
def mapToPythonObj(filename):
res = sublime.load_resource(filename)
# assumption is that the decoded json is represented as
# a python array of dictionaries.
return self.KeyMap(filename,sublime.decode_value(res))
self.keymaps = list(map(mapToPythonObj,filtered_files))
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
def __init__(self,window):
self.window = window
self.file_re = self._get_keymap_regex(sublime.platform())
self.state = None
def run(self):
self.state = self.InternalObject()
self.state.load_keymaps(self.file_re)
input_array = self.state.get_string_list_of_keymaps()
view = self.window.show_quick_panel(
input_array,
flags=sublime.MONOSPACE_FONT,
selected_index=0,
on_select= self.on_select,
on_highlight=None)
# on_highlight=self.on_highlight)
def _get_keymap_regex(self,platform):
if( platform == "windows"):
file_re = re.compile(r'(.*(Default \(Windows\)|Default)\.sublime-keymap)')
elif (platform == "linux"):
file_re = re.compile(r'(.*(Default \(Linux\)|Default)\.sublime-keymap)')
else:
file_re = re.compile(r'(.*(Default \(OSX\)|Default)\.sublime-keymap)')
return file_re
def on_highlight(self,value):
if value == -1:
return
def on_select(self,value):
if value == -1:
return
# open the keymap file.
filename = self.state.get_filename(value)
split_filename = "/".join(filename.split("/")[1:])
# This fucking sucks. I would really like to use the sublime API open_file()
# directly. This would get me a direct ref to the View object and allow me
# to set the cursor to the proper position to show the hotkey binding.
# There are a few problems with this:
# i) sublime-packages are compresesd (zip).
# ii) packages are stored in different folders ( pakcages, pakcage/user, etc)
# and they are all differnet on differenct architectures.
# iii) Changing the sel() on a view doesn't update the cursor position
# on the screen. Not sure if this is a bug, but I thinkg it is
# becaues we aren't making edits using an Edit object. Therefore
# any changes that we make aren't known/shown until some user
# interaction
# Because of these problems I use the following hack.
# 1.) Open the file using the window.run_command, and use the ${packages}
# variables substitution.The internal sublime api automatically finds and
# uncompresses all the files for me. I don't have to deal with anything
# and the proper files gets opened (assuming it exists).
# 2.) The pit-fall to this is that I don't get a direct ref to the View
# that is created/opened. This means that I can't set the cursor position.
# Additinally,because the run_command is async, I don't even know when
# the View gets fully loaded (i.e. I can't use window.active_view())
# 3.) To get around this problem. I creat a helper TextCommand class.
# The purpose of this class is to positoin the cursor on the view.
# (i.e find_all() on a regex string). This is hacky because it pollutes
# the command namespace. This only solves the problem of being able to
# set the cursor position. I still have to use a set_timeout() in order
# to "ensure" the file is opened before I issue the command.
self.window.run_command("open_file",{"file":"${packages}/"+split_filename})
def inner():
self.window.run_command("move_cursor_to_pattern",
{"pattern":r'"keys"\s*:\s*\[',
"index":self.state.get_relative_index(value)})
# TODO: extract settings into my own class,whcih allows you to specify defaults
delay= self.state.settings.get("timeout_delay")
if(delay == None):
delay = 250
sublime.set_timeout(inner,delay)
# A Helper command used to move the cursor to the beginning/end of
# a regex pattern in the view.
class MoveCursorToPatternCommand(sublime_plugin.TextCommand):
def run(self,edit,pattern,index=0):
r = self.view.find_all(pattern)
if index < 0 or index >= len(r):
print("Pattern not found \"{}\"".format(pattern))
return
r = r[index]
self.view.show_at_center(r)
sel = self.view.sel()
sel.clear()
sel.add(sublime.Region(r.b,r.b))
|
mit
|
alfredodeza/execnet
|
execnet/gateway_io.py
|
1
|
7538
|
# -*- coding: utf-8 -*-
"""
execnet io initialization code
creates io instances used for gateway io
"""
import os
import shlex
import sys
try:
from execnet.gateway_base import Popen2IO, Message
except ImportError:
from __main__ import Popen2IO, Message
from functools import partial
class Popen2IOMaster(Popen2IO):
def __init__(self, args, execmodel):
self.popen = p = execmodel.PopenPiped(args)
Popen2IO.__init__(self, p.stdin, p.stdout, execmodel=execmodel)
def wait(self):
try:
return self.popen.wait()
except OSError:
pass # subprocess probably dead already
def kill(self):
killpopen(self.popen)
def killpopen(popen):
try:
if hasattr(popen, "kill"):
popen.kill()
else:
killpid(popen.pid)
except EnvironmentError:
sys.stderr.write("ERROR killing: %s\n" % (sys.exc_info()[1]))
sys.stderr.flush()
def killpid(pid):
if hasattr(os, "kill"):
os.kill(pid, 15)
elif sys.platform == "win32" or getattr(os, "_name", None) == "nt":
import ctypes
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE, False, pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
else:
raise EnvironmentError("no method to kill {}".format(pid))
popen_bootstrapline = "import sys;exec(eval(sys.stdin.readline()))"
def shell_split_path(path):
"""
Use shell lexer to split the given path into a list of components,
taking care to handle Windows' '\' correctly.
"""
if sys.platform.startswith("win"):
# replace \\ by / otherwise shlex will strip them out
path = path.replace("\\", "/")
return shlex.split(path)
def popen_args(spec):
args = shell_split_path(spec.python) if spec.python else [sys.executable]
args.append("-u")
if spec is not None and spec.dont_write_bytecode:
args.append("-B")
# Slight gymnastics in ordering these arguments because CPython (as of
# 2.7.1) ignores -B if you provide `python -c "something" -B`
args.extend(["-c", popen_bootstrapline])
return args
def ssh_args(spec):
# NOTE: If changing this, you need to sync those changes to vagrant_args
# as well, or, take some time to further refactor the commonalities of
# ssh_args and vagrant_args.
remotepython = spec.python or "python"
args = ["ssh", "-C"]
if spec.ssh_config is not None:
args.extend(["-F", str(spec.ssh_config)])
args.extend(spec.ssh.split())
remotecmd = '{} -c "{}"'.format(remotepython, popen_bootstrapline)
args.append(remotecmd)
return args
def vagrant_ssh_args(spec):
# This is the vagrant-wrapped version of SSH. Unfortunately the
# command lines are incompatible to just channel through ssh_args
# due to ordering/templating issues.
# NOTE: This should be kept in sync with the ssh_args behaviour.
# spec.vagrant is identical to spec.ssh in that they both carry
# the remote host "address".
remotepython = spec.python or "python"
args = ["vagrant", "ssh", spec.vagrant_ssh, "--", "-C"]
if spec.ssh_config is not None:
args.extend(["-F", str(spec.ssh_config)])
remotecmd = '{} -c "{}"'.format(remotepython, popen_bootstrapline)
args.extend([remotecmd])
return args
def create_io(spec, execmodel):
if spec.popen:
args = popen_args(spec)
return Popen2IOMaster(args, execmodel)
if spec.ssh:
args = ssh_args(spec)
io = Popen2IOMaster(args, execmodel)
io.remoteaddress = spec.ssh
return io
if spec.vagrant_ssh:
args = vagrant_ssh_args(spec)
io = Popen2IOMaster(args, execmodel)
io.remoteaddress = spec.vagrant_ssh
return io
#
# Proxy Gateway handling code
#
# master: proxy initiator
# forwarder: forwards between master and sub
# sub: sub process that is proxied to the initiator
RIO_KILL = 1
RIO_WAIT = 2
RIO_REMOTEADDRESS = 3
RIO_CLOSE_WRITE = 4
class ProxyIO(object):
""" A Proxy IO object allows to instantiate a Gateway
through another "via" gateway. A master:ProxyIO object
provides an IO object effectively connected to the sub
via the forwarder. To achieve this, master:ProxyIO interacts
with forwarder:serve_proxy_io() which itself
instantiates and interacts with the sub.
"""
def __init__(self, proxy_channel, execmodel):
# after exchanging the control channel we use proxy_channel
# for messaging IO
self.controlchan = proxy_channel.gateway.newchannel()
proxy_channel.send(self.controlchan)
self.iochan = proxy_channel
self.iochan_file = self.iochan.makefile("r")
self.execmodel = execmodel
def read(self, nbytes):
return self.iochan_file.read(nbytes)
def write(self, data):
return self.iochan.send(data)
def _controll(self, event):
self.controlchan.send(event)
return self.controlchan.receive()
def close_write(self):
self._controll(RIO_CLOSE_WRITE)
def kill(self):
self._controll(RIO_KILL)
def wait(self):
return self._controll(RIO_WAIT)
@property
def remoteaddress(self):
return self._controll(RIO_REMOTEADDRESS)
def __repr__(self):
return "<RemoteIO via {}>".format(self.iochan.gateway.id)
class PseudoSpec:
def __init__(self, vars):
self.__dict__.update(vars)
def __getattr__(self, name):
return None
def serve_proxy_io(proxy_channelX):
execmodel = proxy_channelX.gateway.execmodel
log = partial(
proxy_channelX.gateway._trace, "serve_proxy_io:%s" % proxy_channelX.id
)
spec = PseudoSpec(proxy_channelX.receive())
# create sub IO object which we will proxy back to our proxy initiator
sub_io = create_io(spec, execmodel)
control_chan = proxy_channelX.receive()
log("got control chan", control_chan)
# read data from master, forward it to the sub
# XXX writing might block, thus blocking the receiver thread
def forward_to_sub(data):
log("forward data to sub, size %s" % len(data))
sub_io.write(data)
proxy_channelX.setcallback(forward_to_sub)
def controll(data):
if data == RIO_WAIT:
control_chan.send(sub_io.wait())
elif data == RIO_KILL:
control_chan.send(sub_io.kill())
elif data == RIO_REMOTEADDRESS:
control_chan.send(sub_io.remoteaddress)
elif data == RIO_CLOSE_WRITE:
control_chan.send(sub_io.close_write())
control_chan.setcallback(controll)
# write data to the master coming from the sub
forward_to_master_file = proxy_channelX.makefile("w")
# read bootstrap byte from sub, send it on to master
log("reading bootstrap byte from sub", spec.id)
initial = sub_io.read(1)
assert initial == "1".encode("ascii"), initial
log("forwarding bootstrap byte from sub", spec.id)
forward_to_master_file.write(initial)
# enter message forwarding loop
while True:
try:
message = Message.from_io(sub_io)
except EOFError:
log("EOF from sub, terminating proxying loop", spec.id)
break
message.to_io(forward_to_master_file)
# proxy_channelX will be closed from remote_exec's finalization code
if __name__ == "__channelexec__":
serve_proxy_io(channel) # noqa
|
mit
|
lakshayg/tensorflow
|
tensorflow/contrib/opt/python/training/addsign.py
|
55
|
6067
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of AddSign."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class AddSignOptimizer(optimizer.Optimizer):
"""Optimizer that implements the AddSign update.
See [Bello et al., ICML2017],
[Neural Optimizer Search with RL](https://arxiv.org/abs/1709.07417).
"""
def __init__(self,
learning_rate=0.1,
alpha=1.0,
beta=0.9,
sign_decay_fn=None,
use_locking=False,
name='AddSignOptimizer'):
"""Constructs a new AddSignOptimizer object.
Initialization:
```
m_0 <- 0 (Initialize initial 1st moment vector)
t <- 0 (Initialize timestep)
```
Update:
```
t <- t + 1
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
sign_decay <- sign_decay_fn(t)
update <- (alpha + sign_decay * sign(g) *sign(m)) * g
variable <- variable - lr_t * update
```
Example for AddSign-ld (AddSign with linear sign decay)
```
decay_steps = 1000
linear_decay_fn = sign_decays.get_linear_decay_fn(decay_steps)
opt = AddSignOptimizer(learning_rate=0.1, sign_decay_fn=linear_decay_fn)
```
Args:
learning_rate: learning_rate used when taking a step.
alpha: alpha used in optimizer.
beta: decay used for computing the moving average m.
sign_decay_fn: decay function applied to the sign(g) sign(m) quantity.
Takes global_step as an argument. See sign_decay.py for some examples.
use_locking: If True, use locks for update operations.
name: Optional name for the operations created when applying gradients.
Defaults to "AddSignOptimizer".
"""
super(AddSignOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._alpha = alpha
self._beta = beta
self._sign_decay_fn = sign_decay_fn
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._alpha_t = None
self._beta_t = None
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
if self._sign_decay_fn is not None:
self._sign_decay_t = ops.convert_to_tensor(
self._sign_decay_fn(global_step), name='sign_decay')
return super(AddSignOptimizer, self).apply_gradients(
grads_and_vars, global_step=global_step, name=name)
def _create_slots(self, var_list):
# Create slots for the first moment.
for v in var_list:
self._zeros_slot(v, 'm', self._name)
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name='learning_rate')
self._beta_t = ops.convert_to_tensor(self._beta, name='beta')
self._alpha_t = ops.convert_to_tensor(self._alpha, name='alpha')
if self._sign_decay_fn is None:
self._sign_decay_t = ops.convert_to_tensor(1.0, name='sign_decay')
def _apply_dense(self, grad, var):
m = self.get_slot(var, 'm')
return training_ops.apply_add_sign(
var,
m,
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._alpha_t, var.dtype.base_dtype),
math_ops.cast(self._sign_decay_t, var.dtype.base_dtype),
math_ops.cast(self._beta_t, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var):
m = self.get_slot(var, 'm')
return training_ops.resource_apply_add_sign(
var.handle,
m.handle,
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._alpha_t, var.dtype.base_dtype),
math_ops.cast(self._sign_decay_t, var.dtype.base_dtype),
math_ops.cast(self._beta_t, var.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
alpha_t = math_ops.cast(self._alpha_t, var.dtype.base_dtype)
beta_t = math_ops.cast(self._beta_t, var.dtype.base_dtype)
m = self.get_slot(var, 'm')
m_t = state_ops.assign(
m, (m * beta_t) + (grad * (1 - beta_t)), use_locking=self._use_locking)
sign_g = ops.IndexedSlices(
math_ops.sign(grad.values), grad.indices, dense_shape=grad.dense_shape)
sign_gm = ops.IndexedSlices(
array_ops.gather(math_ops.sign(m_t), sign_g.indices) * sign_g.values,
sign_g.indices,
dense_shape=sign_g.dense_shape)
sign_decayed = math_ops.cast(
self._sign_decay_t, var.dtype.base_dtype)
multiplier_values = alpha_t + sign_decayed * sign_gm.values
multiplier = ops.IndexedSlices(
multiplier_values, sign_gm.indices, dense_shape=sign_gm.dense_shape)
final_update = ops.IndexedSlices(
lr_t * multiplier.values * grad.values,
multiplier.indices,
dense_shape=multiplier.dense_shape)
var_update = state_ops.scatter_sub(
var,
final_update.indices,
final_update.values,
use_locking=self._use_locking)
return control_flow_ops.group(* [var_update, m_t])
|
apache-2.0
|
melviso/beatle
|
beatle/app/resources/_file.py
|
2
|
3118
|
# -*- coding: utf-8 -*-
_file = [
"24 24 114 2",
" c None",
". c #DCDCDC",
"+ c #D5D5D5",
"@ c #D7D7D7",
"# c #D2D2D2",
"$ c #D3D2D3",
"% c #D4D4D5",
"& c #D0D0D0",
"* c #C9C9C9",
"= c #C3C3C3",
"- c #BABABA",
"; c #F3F3F3",
"> c #E3E3E3",
", c #E1E1E1",
"' c #E0E0E0",
") c #E2E2E2",
"! c #E1E0E1",
"~ c #DDDEDD",
"{ c #DDDDDD",
"] c #D4D4D4",
"^ c #B8B8B8",
"/ c #EBEAEB",
"( c #DBDADB",
"_ c #D9D9D9",
": c #E6E6E6",
"< c #DFDFDF",
"[ c #DEDDDE",
"} c #DBDBDB",
"| c #B6B6B6",
"1 c #C7C7C7",
"2 c #F1F1F1",
"3 c #EAEAEA",
"4 c #DEDEDE",
"5 c #D3D3D3",
"6 c #D5D4D5",
"7 c #D8D8D8",
"8 c #D6D6D6",
"9 c #D1D1D1",
"0 c #BFBFBF",
"a c #E9E9E9",
"b c #E7E7E7",
"c c #E4E4E4",
"d c #E2E1E2",
"e c #B7B7B7",
"f c #F6F6F6",
"g c #EEEEEE",
"h c #CDCDCD",
"i c #B0B0B0",
"j c #8A8A8A",
"k c #909090",
"l c #A0A0A0",
"m c #ADADAD",
"n c #F2F2F2",
"o c #C5C5C5",
"p c #C6C6C6",
"q c #C5C6C5",
"r c #B5B5B5",
"s c #B4B4B4",
"t c #AFAFAF",
"u c #D9D8D9",
"v c #DADBDA",
"w c #979797",
"x c #F5F5F5",
"y c #ECECEC",
"z c #DADADA",
"A c #E9E9EA",
"B c #A6A6A5",
"C c #F4F4F4",
"D c #CACACA",
"E c #E8E8E9",
"F c #A2A2A1",
"G c #F7F7F7",
"H c #E4E3E3",
"I c #A8A8A8",
"J c #FFFFFF",
"K c #CFCFCF",
"L c #E8E8E8",
"M c #B1B1B1",
"N c #F8F8F8",
"O c #E7E7E8",
"P c #AEAEAD",
"Q c #FCFCFC",
"R c #C8C8C8",
"S c #B8B8B7",
"T c #FAFAFA",
"U c #EDEDED",
"V c #B3B3B3",
"W c #FDFDFD",
"X c #E5E5E5",
"Y c #E6E6E5",
"Z c #F5F4F5",
"` c #CBCBCB",
" . c #FCFAFA",
".. c #EBF1EF",
"+. c #E7E6E7",
"@. c #EFF4F3",
"#. c #E1E0E0",
"$. c #E3E4E3",
"%. c #F9FAFA",
"&. c #F3F2F2",
"*. c #EBEBEB",
"=. c #E5E5E4",
"-. c #D9DADA",
";. c #D3D6D5",
">. c #D0D1D1",
",. c #CECECE",
"'. c #EBEBEA",
"). c #B9B9B8",
"!. c #FEFEFE",
"~. c #EFEFEF",
"{. c #FBFBFB",
"]. c #EBEBEC",
"^. c #BDBDBD",
"/. c #C1C1C1",
" . + @ # $ % & * = - ",
" ; > , ' ) > ! ~ { ] = ^ ",
" / ( _ @ > : < [ } _ = | 1 ",
" 2 3 4 { 5 ' 6 7 8 ] 9 ^ ; 0 ",
" a b ' c { : d , } _ 8 e f g h ",
" f > } . . . . . . . { i j k l m ",
" n c o p o o o q p p p 1 r s t e ",
" n 3 ' c { : [ { . } . . . u v 9 w ",
" x y z } } } } . . . { > 4 , c A B ",
" C y ) 7 9 5 & h p D # p p p 4 E F ",
" G y < . ' < ' ' 4 < , _ 7 8 H E I ",
" J 2 c D b 8 # K D # D 4 4 D L E M ",
" N ; < { ' c L { < < ' . 4 < ' O P ",
" Q C 3 z K R D 5 } D 7 } D 9 ' L S ",
" T U _ 8 _ ' [ 4 z z { 4 4 4 ' O V ",
" W n b 4 ) , { c , 4 { X ' ) Y L - ",
" N Z < _ # 5 & h p * # D ` D 4 O s ",
" ...> . ' < ' ' 4 < ) z z _ +.O S ",
" .@.#.7 5 8 # K D # D ` ` D $.O S ",
" %.&.*.) ' c L . < < ' { 4 ' =.O V ",
" J n -.;.>.,.K 5 c * D D * @ '.O ). ",
" !.G n a ~.U y C x N {.T T x N ].^. ",
" p M ^ e i V | r e M M M ^ e t /. ",
" "]
|
gpl-2.0
|
ObsidianBlk/GemRB--Unofficial-
|
gemrb/GUIScripts/iwd2/Sound.py
|
5
|
4166
|
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#sound options
import GemRB
from GUIDefines import *
SoundWindow = 0
TextAreaControl = 0
def OnLoad():
global SoundWindow, TextAreaControl
GemRB.LoadWindowPack("GUIOPT", 800, 600)
SoundWindow = GemRB.LoadWindow(7)
SoundWindow.SetFrame( )
TextAreaControl = SoundWindow.GetControl(14)
AmbientButton = SoundWindow.GetControl(16)
AmbientSlider = SoundWindow.GetControl(1)
SoundEffectsButton = SoundWindow.GetControl(17)
SoundEffectsSlider = SoundWindow.GetControl(2)
DialogueButton = SoundWindow.GetControl(18)
DialogueSlider = SoundWindow.GetControl(3)
MusicButton = SoundWindow.GetControl(19)
MusicSlider = SoundWindow.GetControl(4)
MoviesButton = SoundWindow.GetControl(20)
MoviesSlider = SoundWindow.GetControl(22)
EnvironmentalButton = SoundWindow.GetControl(28)
EnvironmentalButtonB = SoundWindow.GetControl(26)
CharacterSoundButton = SoundWindow.GetControl(13)
OkButton = SoundWindow.GetControl(24)
CancelButton = SoundWindow.GetControl(25)
TextAreaControl.SetText(18040)
CharacterSoundButton.SetText(17778)
OkButton.SetText(11973)
OkButton.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_OR)
CancelButton.SetText(13727)
CancelButton.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
AmbientButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, AmbientPress)
AmbientSlider.SetEvent(IE_GUI_SLIDER_ON_CHANGE, AmbientPress)
AmbientSlider.SetVarAssoc("Volume Ambients",10)
SoundEffectsButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, SoundEffectsPress)
SoundEffectsSlider.SetEvent(IE_GUI_SLIDER_ON_CHANGE, SoundEffectsPress)
SoundEffectsSlider.SetVarAssoc("Volume SFX",10)
DialogueButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, DialoguePress)
DialogueSlider.SetEvent(IE_GUI_SLIDER_ON_CHANGE, DialoguePress)
DialogueSlider.SetVarAssoc("Volume Voices",10)
MusicButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, MusicPress)
MusicSlider.SetEvent(IE_GUI_SLIDER_ON_CHANGE, MusicPress)
MusicSlider.SetVarAssoc("Volume Music",10)
MoviesButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, MoviesPress)
MoviesSlider.SetEvent(IE_GUI_SLIDER_ON_CHANGE, MoviesPress)
MoviesSlider.SetVarAssoc("Volume Movie",10)
EnvironmentalButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, EnvironmentalPress)
EnvironmentalButtonB.SetEvent(IE_GUI_BUTTON_ON_PRESS, EnvironmentalPress)
EnvironmentalButtonB.SetFlags(IE_GUI_BUTTON_CHECKBOX,OP_OR)
EnvironmentalButtonB.SetVarAssoc("Environmental Audio",1)
EnvironmentalButtonB.SetSprites("GBTNOPT4", 0, 0, 1, 2, 3)
CharacterSoundButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, CharacterSoundPress)
OkButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, OkPress)
CancelButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, CancelPress)
SoundWindow.SetVisible(WINDOW_VISIBLE)
return
def AmbientPress():
TextAreaControl.SetText(18008)
GemRB.UpdateAmbientsVolume ()
return
def SoundEffectsPress():
TextAreaControl.SetText(18009)
return
def DialoguePress():
TextAreaControl.SetText(18010)
return
def MusicPress():
TextAreaControl.SetText(18011)
GemRB.UpdateMusicVolume ()
return
def MoviesPress():
TextAreaControl.SetText(18012)
return
def EnvironmentalPress():
TextAreaControl.SetText(18022)
return
def CharacterSoundPress():
if SoundWindow:
SoundWindow.Unload()
GemRB.SetNextScript("CharSound")
return
def OkPress():
if SoundWindow:
SoundWindow.Unload()
GemRB.SetNextScript("Options")
return
def CancelPress():
if SoundWindow:
SoundWindow.Unload()
GemRB.SetNextScript("Options")
return
|
gpl-2.0
|
18padx08/PPTex
|
PPTexEnv_x86_64/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_interface.py
|
11
|
7922
|
"""Test functions for the sparse.linalg.interface module
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import TestCase, assert_, assert_equal, \
assert_raises
import numpy as np
import scipy.sparse as sparse
from itertools import product
from scipy.sparse.linalg import interface
class TestLinearOperator(TestCase):
def setUp(self):
self.A = np.array([[1,2,3],
[4,5,6]])
self.B = np.array([[1,2],
[3,4],
[5,6]])
self.C = np.array([[1,2],
[3,4]])
def test_matvec(self):
def get_matvecs(A):
return [{
'shape': A.shape,
'matvec': lambda x: np.dot(A, x).reshape(A.shape[0]),
'rmatvec': lambda x: np.dot(A.T.conj(),
x).reshape(A.shape[1])
},
{
'shape': A.shape,
'matvec': lambda x: np.dot(A, x),
'rmatvec': lambda x: np.dot(A.T.conj(), x),
'matmat': lambda x: np.dot(A, x)
}]
for matvecs in get_matvecs(self.A):
A = interface.LinearOperator(**matvecs)
assert_(A.args == ())
assert_equal(A.matvec(np.array([1,2,3])), [14,32])
assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]])
assert_equal(A * np.array([1,2,3]), [14,32])
assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]])
assert_equal(A.dot(np.array([1,2,3])), [14,32])
assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]])
assert_equal(A.matvec(np.matrix([[1],[2],[3]])), [[14],[32]])
assert_equal(A * np.matrix([[1],[2],[3]]), [[14],[32]])
assert_equal(A.dot(np.matrix([[1],[2],[3]])), [[14],[32]])
assert_equal((2*A)*[1,1,1], [12,30])
assert_equal((2*A).rmatvec([1,1]), [10, 14, 18])
assert_equal((2*A)*[[1],[1],[1]], [[12],[30]])
assert_equal((2*A).matmat([[1],[1],[1]]), [[12],[30]])
assert_equal((A*2)*[1,1,1], [12,30])
assert_equal((A*2)*[[1],[1],[1]], [[12],[30]])
assert_equal((2j*A)*[1,1,1], [12j,30j])
assert_equal((A+A)*[1,1,1], [12, 30])
assert_equal((A+A).rmatvec([1,1]), [10, 14, 18])
assert_equal((A+A)*[[1],[1],[1]], [[12], [30]])
assert_equal((A+A).matmat([[1],[1],[1]]), [[12], [30]])
assert_equal((-A)*[1,1,1], [-6,-15])
assert_equal((-A)*[[1],[1],[1]], [[-6],[-15]])
assert_equal((A-A)*[1,1,1], [0,0])
assert_equal((A-A)*[[1],[1],[1]], [[0],[0]])
z = A+A
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is A)
z = 2*A
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] == 2)
assert_(isinstance(A.matvec(np.array([1,2,3])), np.ndarray))
assert_(isinstance(A.matvec(np.array([[1],[2],[3]])), np.ndarray))
assert_(isinstance(A * np.array([1,2,3]), np.ndarray))
assert_(isinstance(A * np.array([[1],[2],[3]]), np.ndarray))
assert_(isinstance(A.dot(np.array([1,2,3])), np.ndarray))
assert_(isinstance(A.dot(np.array([[1],[2],[3]])), np.ndarray))
assert_(isinstance(A.matvec(np.matrix([[1],[2],[3]])), np.ndarray))
assert_(isinstance(A * np.matrix([[1],[2],[3]]), np.ndarray))
assert_(isinstance(A.dot(np.matrix([[1],[2],[3]])), np.ndarray))
assert_(isinstance(2*A, interface._ScaledLinearOperator))
assert_(isinstance(2j*A, interface._ScaledLinearOperator))
assert_(isinstance(A+A, interface._SumLinearOperator))
assert_(isinstance(-A, interface._ScaledLinearOperator))
assert_(isinstance(A-A, interface._SumLinearOperator))
assert_((2j*A).dtype == np.complex_)
assert_raises(ValueError, A.matvec, np.array([1,2]))
assert_raises(ValueError, A.matvec, np.array([1,2,3,4]))
assert_raises(ValueError, A.matvec, np.array([[1],[2]]))
assert_raises(ValueError, A.matvec, np.array([[1],[2],[3],[4]]))
assert_raises(ValueError, lambda: A*A)
assert_raises(ValueError, lambda: A**2)
for matvecsA, matvecsB in product(get_matvecs(self.A),
get_matvecs(self.B)):
A = interface.LinearOperator(**matvecsA)
B = interface.LinearOperator(**matvecsB)
assert_equal((A*B)*[1,1], [50,113])
assert_equal((A*B)*[[1],[1]], [[50],[113]])
assert_equal((A*B).matmat([[1],[1]]), [[50],[113]])
assert_equal((A*B).rmatvec([1,1]), [71,92])
assert_(isinstance(A*B, interface._ProductLinearOperator))
assert_raises(ValueError, lambda: A+B)
assert_raises(ValueError, lambda: A**2)
z = A*B
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is B)
for matvecsC in get_matvecs(self.C):
C = interface.LinearOperator(**matvecsC)
assert_equal((C**2)*[1,1], [17,37])
assert_equal((C**2).rmatvec([1,1]), [22,32])
assert_equal((C**2).matmat([[1],[1]]), [[17],[37]])
assert_(isinstance(C**2, interface._PowerLinearOperator))
class TestAsLinearOperator(TestCase):
def setUp(self):
self.cases = []
def make_cases(dtype):
self.cases.append(np.matrix([[1,2,3],[4,5,6]], dtype=dtype))
self.cases.append(np.array([[1,2,3],[4,5,6]], dtype=dtype))
self.cases.append(sparse.csr_matrix([[1,2,3],[4,5,6]], dtype=dtype))
class matlike:
def __init__(self, dtype):
self.dtype = np.dtype(dtype)
self.shape = (2,3)
def matvec(self,x):
y = np.array([1*x[0] + 2*x[1] + 3*x[2],
4*x[0] + 5*x[1] + 6*x[2]], dtype=self.dtype)
if len(x.shape) == 2:
y = y.reshape(-1,1)
return y
def rmatvec(self,x):
return np.array([1*x[0] + 4*x[1],
2*x[0] + 5*x[1],
3*x[0] + 6*x[1]], dtype=self.dtype)
self.cases.append(matlike('int'))
make_cases('int32')
make_cases('float32')
make_cases('float64')
def test_basic(self):
for M in self.cases:
A = interface.aslinearoperator(M)
M,N = A.shape
assert_equal(A.matvec(np.array([1,2,3])), [14,32])
assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]])
assert_equal(A * np.array([1,2,3]), [14,32])
assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]])
assert_equal(A.rmatvec(np.array([1,2])), [9,12,15])
assert_equal(A.rmatvec(np.array([[1],[2]])), [[9],[12],[15]])
assert_equal(
A.matmat(np.array([[1,4],[2,5],[3,6]])),
[[14,32],[32,77]])
assert_equal(A * np.array([[1,4],[2,5],[3,6]]), [[14,32],[32,77]])
if hasattr(M,'dtype'):
assert_equal(A.dtype, M.dtype)
def test_dot(self):
for M in self.cases:
A = interface.aslinearoperator(M)
M,N = A.shape
assert_equal(A.dot(np.array([1,2,3])), [14,32])
assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]])
assert_equal(
A.dot(np.array([[1,4],[2,5],[3,6]])),
[[14,32],[32,77]])
|
mit
|
wpjesus/codematch
|
ietf/secr/proceedings/forms.py
|
1
|
6833
|
import os
from django import forms
from django.conf import settings
from django.template.defaultfilters import filesizeformat
from ietf.doc.models import Document
from ietf.group.models import Group
from ietf.name.models import DocTypeName
from ietf.meeting.models import Meeting, Session
# ---------------------------------------------
# Globals
# ---------------------------------------------
VALID_SLIDE_EXTENSIONS = ('.doc','.docx','.pdf','.ppt','.pptx','.txt','.zip')
VALID_MINUTES_EXTENSIONS = ('.txt','.html','.htm','.pdf')
VALID_AGENDA_EXTENSIONS = ('.txt','.html','.htm')
VALID_BLUESHEET_EXTENSIONS = ('.pdf','.jpg','.jpeg')
#----------------------------------------------------------
# Forms
#----------------------------------------------------------
class AjaxChoiceField(forms.ChoiceField):
'''
Special ChoiceField to use when populating options with Ajax. The submitted value
is not in the initial choices list so we need to override valid_value().
'''
def valid_value(self, value):
return True
class EditSlideForm(forms.ModelForm):
class Meta:
model = Document
fields = ('title',)
class InterimMeetingForm(forms.Form):
date = forms.DateField(help_text="(YYYY-MM-DD Format, please)")
group_acronym_id = forms.CharField(widget=forms.HiddenInput())
def clean(self):
super(InterimMeetingForm, self).clean()
cleaned_data = self.cleaned_data
# need to use get() here, if the date field isn't valid it won't exist
date = cleaned_data.get('date','')
group_acronym_id = cleaned_data["group_acronym_id"]
qs = Meeting.objects.filter(type='interim',date=date,session__group__acronym=group_acronym_id)
if qs:
raise forms.ValidationError('A meeting already exists for this date.')
return cleaned_data
class RecordingForm(forms.Form):
group = forms.CharField(max_length=40)
external_url = forms.URLField(label='Url')
session = AjaxChoiceField(choices=(('','----'),))
def clean_session(self):
'''
Emulate ModelChoiceField functionality
'''
id = self.cleaned_data.get('session')
try:
return Session.objects.get(id=id)
except Session.DoesNotExist:
raise forms.ValidationError('Invalid Session')
def clean_group(self):
acronym = self.cleaned_data.get('group')
try:
return Group.objects.get(acronym=acronym)
except Group.DoesNotExist:
raise forms.ValidationError('Invalid group name')
class RecordingEditForm(forms.ModelForm):
class Meta:
model = Document
fields = ['external_url']
def __init__(self, *args, **kwargs):
super(RecordingEditForm, self).__init__(*args, **kwargs)
self.fields['external_url'].label='Url'
class ReplaceSlideForm(forms.ModelForm):
file = forms.FileField(label='Select File')
class Meta:
model = Document
fields = ('title',)
def clean_file(self):
file = self.cleaned_data.get('file')
ext = os.path.splitext(file.name)[1].lower()
if ext not in VALID_SLIDE_EXTENSIONS:
raise forms.ValidationError('Only these file types supported for presentation slides: %s' % ','.join(VALID_SLIDE_EXTENSIONS))
if file._size > settings.SECR_MAX_UPLOAD_SIZE:
raise forms.ValidationError('Please keep filesize under %s. Current filesize %s' % (filesizeformat(settings.SECR_MAX_UPLOAD_SIZE), filesizeformat(file._size)))
return file
class UnifiedUploadForm(forms.Form):
acronym = forms.CharField(widget=forms.HiddenInput())
meeting_id = forms.CharField(widget=forms.HiddenInput())
material_type = forms.ModelChoiceField(queryset=DocTypeName.objects.filter(slug__in=('minutes','agenda','slides','bluesheets')),empty_label=None)
slide_name = forms.CharField(label='Name of Presentation',max_length=255,required=False,help_text="For presentations only")
file = forms.FileField(label='Select File',help_text='<div id="id_file_help">Note 1: You can only upload a presentation file in txt, pdf, doc, or ppt/pptx. System will not accept presentation files in any other format.<br><br>Note 2: All uploaded files will be available to the public immediately on the Preliminary Page. However, for the Proceedings, ppt/pptx files will be converted to html format and doc files will be converted to pdf format manually by the Secretariat staff.</div>')
def clean_file(self):
file = self.cleaned_data['file']
if file._size > settings.SECR_MAX_UPLOAD_SIZE:
raise forms.ValidationError('Please keep filesize under %s. Current filesize %s' % (filesizeformat(settings.SECR_MAX_UPLOAD_SIZE), filesizeformat(file._size)))
return file
def clean(self):
super(UnifiedUploadForm, self).clean()
# if an invalid file type is supplied no file attribute will exist
if self.errors:
return self.cleaned_data
cleaned_data = self.cleaned_data
material_type = cleaned_data['material_type']
slide_name = cleaned_data['slide_name']
file = cleaned_data['file']
ext = os.path.splitext(file.name)[1].lower()
if material_type.slug == 'slides' and not slide_name:
raise forms.ValidationError('ERROR: Name of Presentaion cannot be blank')
# only supporting PDFs per Alexa 04-05-2011
#if material_type == 1 and not file_ext[1] == '.pdf':
# raise forms.ValidationError('Presentations must be a PDF file')
# validate file extensions based on material type (slides,agenda,minutes,bluesheets)
# valid extensions per online documentation: meeting-materials.html
# 09-14-11 added ppt, pdf per Alexa
# 04-19-12 txt/html for agenda, +pdf for minutes per Russ
if material_type.slug == 'slides' and ext not in VALID_SLIDE_EXTENSIONS:
raise forms.ValidationError('Only these file types supported for presentation slides: %s' % ','.join(VALID_SLIDE_EXTENSIONS))
if material_type.slug == 'agenda' and ext not in VALID_AGENDA_EXTENSIONS:
raise forms.ValidationError('Only these file types supported for agendas: %s' % ','.join(VALID_AGENDA_EXTENSIONS))
if material_type.slug == 'minutes' and ext not in VALID_MINUTES_EXTENSIONS:
raise forms.ValidationError('Only these file types supported for minutes: %s' % ','.join(VALID_MINUTES_EXTENSIONS))
if material_type.slug == 'bluesheets' and ext not in VALID_BLUESHEET_EXTENSIONS:
raise forms.ValidationError('Only these file types supported for bluesheets: %s' % ','.join(VALID_BLUESHEET_EXTENSIONS))
return cleaned_data
|
bsd-3-clause
|
hyperized/ansible
|
lib/ansible/modules/cloud/azure/azure_rm_iotdevice_info.py
|
21
|
10097
|
#!/usr/bin/python
#
# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_iotdevice_info
version_added: "2.9"
short_description: Facts of Azure IoT hub device
description:
- Query, get Azure IoT hub device.
options:
hub:
description:
- Name of IoT Hub.
type: str
required: true
hub_policy_name:
description:
- Policy name of the IoT Hub which will be used to query from IoT hub.
- This policy should have at least 'Registry Read' access.
type: str
required: true
hub_policy_key:
description:
- Key of the I(hub_policy_name).
type: str
required: true
name:
description:
- Name of the IoT hub device identity.
type: str
aliases:
- device_id
module_id:
description:
- Name of the IoT hub device module.
- Must use with I(device_id) defined.
type: str
query:
description:
- Query an IoT hub to retrieve information regarding device twins using a SQL-like language.
- "See U(https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-query-language)."
type: str
top:
description:
- Used when I(name) not defined.
- List the top n devices in the query.
type: int
extends_documentation_fragment:
- azure
- azure_tags
author:
- Yuwei Zhou (@yuwzho)
'''
EXAMPLES = '''
- name: Get the details of a device
azure_rm_iotdevice_info:
name: Testing
hub: MyIoTHub
hub_policy_name: registryRead
hub_policy_key: XXXXXXXXXXXXXXXXXXXX
- name: Query all device modules in an IoT Hub
azure_rm_iotdevice_info:
query: "SELECT * FROM devices.modules"
hub: MyIoTHub
hub_policy_name: registryRead
hub_policy_key: XXXXXXXXXXXXXXXXXXXX
- name: List all devices in an IoT Hub
azure_rm_iotdevice_info:
hub: MyIoTHub
hub_policy_name: registryRead
hub_policy_key: XXXXXXXXXXXXXXXXXXXX
'''
RETURN = '''
iot_devices:
description:
- IoT Hub device.
returned: always
type: dict
sample: {
"authentication": {
"symmetricKey": {
"primaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"secondaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
},
"type": "sas",
"x509Thumbprint": {
"primaryThumbprint": null,
"secondaryThumbprint": null
}
},
"capabilities": {
"iotEdge": false
},
"changed": true,
"cloudToDeviceMessageCount": 0,
"connectionState": "Disconnected",
"connectionStateUpdatedTime": "0001-01-01T00:00:00",
"deviceId": "Testing",
"etag": "NzA2NjU2ODc=",
"failed": false,
"generationId": "636903014505613307",
"lastActivityTime": "0001-01-01T00:00:00",
"modules": [
{
"authentication": {
"symmetricKey": {
"primaryKey": "XXXXXXXXXXXXXXXXXXX",
"secondaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
},
"type": "sas",
"x509Thumbprint": {
"primaryThumbprint": null,
"secondaryThumbprint": null
}
},
"cloudToDeviceMessageCount": 0,
"connectionState": "Disconnected",
"connectionStateUpdatedTime": "0001-01-01T00:00:00",
"deviceId": "testdevice",
"etag": "MjgxOTE5ODE4",
"generationId": "636903840872788074",
"lastActivityTime": "0001-01-01T00:00:00",
"managedBy": null,
"moduleId": "test"
}
],
"properties": {
"desired": {
"$metadata": {
"$lastUpdated": "2019-04-10T05:00:46.2702079Z",
"$lastUpdatedVersion": 8,
"period": {
"$lastUpdated": "2019-04-10T05:00:46.2702079Z",
"$lastUpdatedVersion": 8
}
},
"$version": 1,
"period": 100
},
"reported": {
"$metadata": {
"$lastUpdated": "2019-04-08T06:24:10.5613307Z"
},
"$version": 1
}
},
"status": "enabled",
"statusReason": null,
"statusUpdatedTime": "0001-01-01T00:00:00",
"tags": {
"location": {
"country": "us",
"city": "Redmond"
},
"sensor": "humidity"
}
}
''' # NOQA
import json
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
try:
from msrestazure.tools import parse_resource_id
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMIoTDeviceFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str', aliases=['device_id']),
module_id=dict(type='str'),
query=dict(type='str'),
hub=dict(type='str', required=True),
hub_policy_name=dict(type='str', required=True),
hub_policy_key=dict(type='str', required=True),
top=dict(type='int')
)
self.results = dict(
changed=False,
iot_devices=[]
)
self.name = None
self.module_id = None
self.hub = None
self.hub_policy_name = None
self.hub_policy_key = None
self.top = None
self._mgmt_client = None
self._base_url = None
self.query_parameters = {
'api-version': '2018-06-30'
}
self.header_parameters = {
'Content-Type': 'application/json; charset=utf-8',
'accept-language': 'en-US'
}
super(AzureRMIoTDeviceFacts, self).__init__(self.module_arg_spec, supports_check_mode=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec.keys():
setattr(self, key, kwargs[key])
self._base_url = '{0}.azure-devices.net'.format(self.hub)
config = {
'base_url': self._base_url,
'key': self.hub_policy_key,
'policy': self.hub_policy_name
}
if self.top:
self.query_parameters['top'] = self.top
self._mgmt_client = self.get_data_svc_client(**config)
response = []
if self.module_id:
response = [self.get_device_module()]
elif self.name:
response = [self.get_device()]
elif self.query:
response = self.hub_query()
else:
response = self.list_devices()
self.results['iot_devices'] = response
return self.results
def hub_query(self):
try:
url = '/devices/query'
request = self._mgmt_client.post(url, self.query_parameters)
query = {
'query': self.query
}
response = self._mgmt_client.send(request=request, headers=self.header_parameters, content=query)
if response.status_code not in [200]:
raise CloudError(response)
return json.loads(response.text)
except Exception as exc:
self.fail('Error when running query "{0}" in IoT Hub {1}: {2}'.format(self.query, self.hub, exc.message or str(exc)))
def get_device(self):
try:
url = '/devices/{0}'.format(self.name)
device = self._https_get(url, self.query_parameters, self.header_parameters)
device['modules'] = self.list_device_modules()
return device
except Exception as exc:
self.fail('Error when getting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
def get_device_module(self):
try:
url = '/devices/{0}/modules/{1}'.format(self.name, self.module_id)
return self._https_get(url, self.query_parameters, self.header_parameters)
except Exception as exc:
self.fail('Error when getting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
def list_device_modules(self):
try:
url = '/devices/{0}/modules'.format(self.name)
return self._https_get(url, self.query_parameters, self.header_parameters)
except Exception as exc:
self.fail('Error when getting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
def list_devices(self):
try:
url = '/devices'
return self._https_get(url, self.query_parameters, self.header_parameters)
except Exception as exc:
self.fail('Error when listing IoT Hub devices in {0}: {1}'.format(self.hub, exc.message or str(exc)))
def _https_get(self, url, query_parameters, header_parameters):
request = self._mgmt_client.get(url, query_parameters)
response = self._mgmt_client.send(request=request, headers=header_parameters, content=None)
if response.status_code not in [200]:
raise CloudError(response)
return json.loads(response.text)
def main():
AzureRMIoTDeviceFacts()
if __name__ == '__main__':
main()
|
gpl-3.0
|
joeythesaint/yocto-autobuilder
|
lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/gps/rockwell.py
|
37
|
11628
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""Rockwell Semiconductor Zodiac Serial Protocol
Coded from official protocol specs (Order No. GPS-25, 09/24/1996, Revision 11)
Maintainer: Bob Ippolito
The following Rockwell Zodiac messages are currently understood::
EARTHA\\r\\n (a hack to "turn on" a DeLorme Earthmate)
1000 (Geodesic Position Status Output)
1002 (Channel Summary)
1003 (Visible Satellites)
1011 (Receiver ID)
The following Rockwell Zodiac messages require implementation::
None really, the others aren't quite so useful and require bidirectional communication w/ the device
Other desired features::
- Compatability with the DeLorme Tripmate and other devices with this chipset (?)
"""
import struct, operator, math
from twisted.internet import protocol
from twisted.python import log
DEBUG = 1
class ZodiacParseError(ValueError):
pass
class Zodiac(protocol.Protocol):
dispatch = {
# Output Messages (* means they get sent by the receiver by default periodically)
1000: 'fix', # *Geodesic Position Status Output
1001: 'ecef', # ECEF Position Status Output
1002: 'channels', # *Channel Summary
1003: 'satellites', # *Visible Satellites
1005: 'dgps', # Differential GPS Status
1007: 'channelmeas', # Channel Measurement
1011: 'id', # *Receiver ID
1012: 'usersettings', # User-Settings Output
1100: 'testresults', # Built-In Test Results
1102: 'meastimemark', # Measurement Time Mark
1108: 'utctimemark', # UTC Time Mark Pulse Output
1130: 'serial', # Serial Port Communication Parameters In Use
1135: 'eepromupdate', # EEPROM Update
1136: 'eepromstatus', # EEPROM Status
}
# these aren't used for anything yet, just sitting here for reference
messages = {
# Input Messages
'fix': 1200, # Geodesic Position and Velocity Initialization
'udatum': 1210, # User-Defined Datum Definition
'mdatum': 1211, # Map Datum Select
'smask': 1212, # Satellite Elevation Mask Control
'sselect': 1213, # Satellite Candidate Select
'dgpsc': 1214, # Differential GPS Control
'startc': 1216, # Cold Start Control
'svalid': 1217, # Solution Validity Control
'antenna': 1218, # Antenna Type Select
'altinput': 1219, # User-Entered Altitude Input
'appctl': 1220, # Application Platform Control
'navcfg': 1221, # Nav Configuration
'test': 1300, # Perform Built-In Test Command
'restart': 1303, # Restart Command
'serial': 1330, # Serial Port Communications Parameters
'msgctl': 1331, # Message Protocol Control
'dgpsd': 1351, # Raw DGPS RTCM SC-104 Data
}
MAX_LENGTH = 296
allow_earthmate_hack = 1
recvd = ""
def dataReceived(self, recd):
self.recvd = self.recvd + recd
while len(self.recvd) >= 10:
# hack for DeLorme EarthMate
if self.recvd[:8] == 'EARTHA\r\n':
if self.allow_earthmate_hack:
self.allow_earthmate_hack = 0
self.transport.write('EARTHA\r\n')
self.recvd = self.recvd[8:]
continue
if self.recvd[0:2] != '\xFF\x81':
if DEBUG:
raise ZodiacParseError('Invalid Sync %r' % self.recvd)
else:
raise ZodiacParseError
sync, msg_id, length, acknak, checksum = struct.unpack('<HHHHh', self.recvd[:10])
# verify checksum
cksum = -(reduce(operator.add, (sync, msg_id, length, acknak)) & 0xFFFF)
cksum, = struct.unpack('<h', struct.pack('<h', cksum))
if cksum != checksum:
if DEBUG:
raise ZodiacParseError('Invalid Header Checksum %r != %r %r' % (checksum, cksum, self.recvd[:8]))
else:
raise ZodiacParseError
# length was in words, now it's bytes
length = length * 2
# do we need more data ?
neededBytes = 10
if length:
neededBytes += length + 2
if len(self.recvd) < neededBytes:
break
if neededBytes > self.MAX_LENGTH:
raise ZodiacParseError("Invalid Header??")
# empty messages pass empty strings
message = ''
# does this message have data ?
if length:
message, checksum = self.recvd[10:10+length], struct.unpack('<h', self.recvd[10+length:neededBytes])[0]
cksum = 0x10000 - (reduce(operator.add, struct.unpack('<%dH' % (length/2), message)) & 0xFFFF)
cksum, = struct.unpack('<h', struct.pack('<h', cksum))
if cksum != checksum:
if DEBUG:
log.dmsg('msg_id = %r length = %r' % (msg_id, length), debug=True)
raise ZodiacParseError('Invalid Data Checksum %r != %r %r' % (checksum, cksum, message))
else:
raise ZodiacParseError
# discard used buffer, dispatch message
self.recvd = self.recvd[neededBytes:]
self.receivedMessage(msg_id, message, acknak)
def receivedMessage(self, msg_id, message, acknak):
dispatch = self.dispatch.get(msg_id, None)
if not dispatch:
raise ZodiacParseError('Unknown msg_id = %r' % msg_id)
handler = getattr(self, 'handle_%s' % dispatch, None)
decoder = getattr(self, 'decode_%s' % dispatch, None)
if not (handler and decoder):
# missing handler or decoder
#if DEBUG:
# log.msg('MISSING HANDLER/DECODER PAIR FOR: %r' % (dispatch,), debug=True)
return
decoded = decoder(message)
return handler(*decoded)
def decode_fix(self, message):
assert len(message) == 98, "Geodesic Position Status Output should be 55 words total (98 byte message)"
(ticks, msgseq, satseq, navstatus, navtype, nmeasure, polar, gpswk, gpses, gpsns, utcdy, utcmo, utcyr, utchr, utcmn, utcsc, utcns, latitude, longitude, height, geoidalsep, speed, course, magvar, climb, mapdatum, exhposerr, exvposerr, extimeerr, exphvelerr, clkbias, clkbiasdev, clkdrift, clkdriftdev) = struct.unpack('<LhhHHHHHLLHHHHHHLlllhLHhhHLLLHllll', message)
# there's a lot of shit in here..
# I'll just snag the important stuff and spit it out like my NMEA decoder
utc = (utchr * 3600.0) + (utcmn * 60.0) + utcsc + (float(utcns) * 0.000000001)
log.msg('utchr, utcmn, utcsc, utcns = ' + repr((utchr, utcmn, utcsc, utcns)), debug=True)
latitude = float(latitude) * 0.00000180 / math.pi
longitude = float(longitude) * 0.00000180 / math.pi
posfix = not (navstatus & 0x001c)
satellites = nmeasure
hdop = float(exhposerr) * 0.01
altitude = float(height) * 0.01, 'M'
geoid = float(geoidalsep) * 0.01, 'M'
dgps = None
return (
# seconds since 00:00 UTC
utc,
# latitude (degrees)
latitude,
# longitude (degrees)
longitude,
# position fix status (invalid = False, valid = True)
posfix,
# number of satellites [measurements] used for fix 0 <= satellites <= 12
satellites,
# horizontal dilution of precision
hdop,
# (altitude according to WGS-84 ellipsoid, units (always 'M' for meters))
altitude,
# (geoid separation according to WGS-84 ellipsoid, units (always 'M' for meters))
geoid,
# None, for compatability w/ NMEA code
dgps,
)
def decode_id(self, message):
assert len(message) == 106, "Receiver ID Message should be 59 words total (106 byte message)"
ticks, msgseq, channels, software_version, software_date, options_list, reserved = struct.unpack('<Lh20s20s20s20s20s', message)
channels, software_version, software_date, options_list = map(lambda s: s.split('\0')[0], (channels, software_version, software_date, options_list))
software_version = float(software_version)
channels = int(channels) # 0-12 .. but ALWAYS 12, so we ignore.
options_list = int(options_list[:4], 16) # only two bitflags, others are reserved
minimize_rom = (options_list & 0x01) > 0
minimize_ram = (options_list & 0x02) > 0
# (version info), (options info)
return ((software_version, software_date), (minimize_rom, minimize_ram))
def decode_channels(self, message):
assert len(message) == 90, "Channel Summary Message should be 51 words total (90 byte message)"
ticks, msgseq, satseq, gpswk, gpsws, gpsns = struct.unpack('<LhhHLL', message[:18])
channels = []
message = message[18:]
for i in range(12):
flags, prn, cno = struct.unpack('<HHH', message[6 * i:6 * (i + 1)])
# measurement used, ephemeris available, measurement valid, dgps corrections available
flags = (flags & 0x01, flags & 0x02, flags & 0x04, flags & 0x08)
channels.append((flags, prn, cno))
# ((flags, satellite PRN, C/No in dbHz)) for 12 channels
# satellite message sequence number
# gps week number, gps seconds in week (??), gps nanoseconds from Epoch
return (tuple(channels),) #, satseq, (gpswk, gpsws, gpsns))
def decode_satellites(self, message):
assert len(message) == 90, "Visible Satellites Message should be 51 words total (90 byte message)"
ticks, msgseq, gdop, pdop, hdop, vdop, tdop, numsatellites = struct.unpack('<LhhhhhhH', message[:18])
gdop, pdop, hdop, vdop, tdop = map(lambda n: float(n) * 0.01, (gdop, pdop, hdop, vdop, tdop))
satellites = []
message = message[18:]
for i in range(numsatellites):
prn, azi, elev = struct.unpack('<Hhh', message[6 * i:6 * (i + 1)])
azi, elev = map(lambda n: (float(n) * 0.0180 / math.pi), (azi, elev))
satellites.push((prn, azi, elev))
# ((PRN [0, 32], azimuth +=[0.0, 180.0] deg, elevation +-[0.0, 90.0] deg)) satellite info (0-12)
# (geometric, position, horizontal, vertical, time) dilution of precision
return (tuple(satellites), (gdop, pdop, hdop, vdop, tdop))
def decode_dgps(self, message):
assert len(message) == 38, "Differential GPS Status Message should be 25 words total (38 byte message)"
raise NotImplementedError
def decode_ecef(self, message):
assert len(message) == 96, "ECEF Position Status Output Message should be 54 words total (96 byte message)"
raise NotImplementedError
def decode_channelmeas(self, message):
assert len(message) == 296, "Channel Measurement Message should be 154 words total (296 byte message)"
raise NotImplementedError
def decode_usersettings(self, message):
assert len(message) == 32, "User-Settings Output Message should be 22 words total (32 byte message)"
raise NotImplementedError
def decode_testresults(self, message):
assert len(message) == 28, "Built-In Test Results Message should be 20 words total (28 byte message)"
raise NotImplementedError
def decode_meastimemark(self, message):
assert len(message) == 494, "Measurement Time Mark Message should be 253 words total (494 byte message)"
raise NotImplementedError
def decode_utctimemark(self, message):
assert len(message) == 28, "UTC Time Mark Pulse Output Message should be 20 words total (28 byte message)"
raise NotImplementedError
def decode_serial(self, message):
assert len(message) == 30, "Serial Port Communication Paramaters In Use Message should be 21 words total (30 byte message)"
raise NotImplementedError
def decode_eepromupdate(self, message):
assert len(message) == 8, "EEPROM Update Message should be 10 words total (8 byte message)"
raise NotImplementedError
def decode_eepromstatus(self, message):
assert len(message) == 24, "EEPROM Status Message should be 18 words total (24 byte message)"
raise NotImplementedError
|
gpl-2.0
|
ioffer30/google-blog-converters-appengine
|
src/blogger2movabletype/b2mt.py
|
30
|
6266
|
#!/usr/bin/env python
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import logging
import re
import sys
import time
from xml.sax.saxutils import unescape
import gdata
from gdata import atom
import iso8601
import movabletype
__author__ = 'JJ Lueck (jlueck@gmail.com)'
###########################
# Constants
###########################
BLOGGER_NS = 'http://www.blogger.com/atom/ns#'
KIND_SCHEME = 'http://schemas.google.com/g/2005#kind'
###########################
# Translation class
###########################
class Blogger2MovableType(object):
"""Performs the translation of a Blogger export document to WordPress WXR."""
def __init__(self, doc):
"""Constructs a translator for a Blogger export file.
Args:
doc: The WXR file as a string
"""
# Ensure UTF8 chars get through correctly by ensuring we have a
# compliant UTF8 input doc.
self.doc = doc.decode('utf-8', 'replace').encode('utf-8')
# Read the incoming document as a GData Atom feed.
self.feed = atom.FeedFromString(self.doc)
self.next_id = 1
def Translate(self):
"""Performs the actual translation to WordPress WXR export format.
Returns:
A WordPress WXR export document as a string, or None on error.
"""
# Create the top-level document and the channel associated with it.
# Keep a map of posts so that we can write out one post with all of
# its comments
posts_map = {}
mt = movabletype.MovableTypeExport()
for entry in self.feed.entry:
# Grab the information about the entry kind
entry_kind = ""
for category in entry.category:
if category.scheme == KIND_SCHEME:
entry_kind = category.term
if entry_kind.endswith("#comment"):
# This entry will be a comment, grab the post that it goes to
in_reply_to = entry.FindExtensions('in-reply-to')
post_item = None
# Check to see that the comment has a corresponding post entry
if in_reply_to:
post_id = self._ParsePostId(in_reply_to[0].attributes['ref'])
post_item = posts_map.get(post_id, None)
# Found the post for the comment, add the commment to it
if post_item:
# The author email may not be included in the file
author_email = ''
if entry.author[0].email:
author_email = entry.author[0].email.text
# Same for the the author's url
author_url = ''
if entry.author[0].uri:
author_url = entry.author[0].uri.text
comment = movabletype.MovableTypeComment()
comment.author = entry.author[0].name.text
comment.email = author_email
comment.url = author_url
comment.date = self._ConvertDate(entry.published.text)
comment.body = self._ConvertContent(entry.content.text)
post_item.comments.append(comment)
elif entry_kind.endswith("#post"):
# This entry will be a post
post_item = self._ConvertPostEntry(entry)
posts_map[self._ParsePostId(entry.id.text)] = post_item
mt.posts.append(post_item)
return mt.ToString()
def _ConvertPostEntry(self, entry):
"""Converts the contents of an Atom entry into a WXR post Item element."""
# A post may have an empty title, in which case the text element is None.
title = ''
if entry.title.text:
title = entry.title.text
# Check here to see if the entry points to a draft or regular post
status = 'Publish'
if entry.control and entry.control.draft:
status = 'Draft'
# Create the actual item element
post_item = movabletype.MovableTypePost()
post_item.title = title
post_item.date = self._ConvertDate(entry.published.text),
post_item.author = entry.author[0].name.text,
post_item.body = self._ConvertContent(entry.content.text),
post_item.status = status
# Convert the categories which specify labels into wordpress labels
for category in entry.category:
if category.scheme == BLOGGER_NS:
post_item.categories.append(category.term)
# How does one specify the primary category for a post
post_item.primary_category = category.term
return post_item
def _ConvertContent(self, text):
"""Converts the text into plain-text
If no text is provided, the empty string is returned.
"""
if not text:
return ''
# First unescape all XML tags as they'll be escaped by the XML emitter
content = unescape(text)
return str(content)
def _ConvertDate(self, date):
"""Translates to a wordpress date element's time/date format."""
date_tuple = iso8601.parse_date(date)
return date_tuple.strftime('%m/%d/%Y %I:%M:%S %p')
def _GetNextId(self):
"""Returns the next identifier to use in the export document as a string."""
next_id = self.next_id;
self.next_id += 1
return str(next_id)
def _ParsePostId(self, text):
"""Extracts the post identifier from a Blogger entry ID."""
matcher = re.compile('post-(\d+)')
matches = matcher.search(text)
return matches.group(1)
if __name__ == '__main__':
if len(sys.argv) <= 1:
print 'Usage: %s <blogger_export_file>' % os.path.basename(sys.argv[0])
print
print ' Outputs the converted MovableType export file to standard out.'
sys.exit(-1)
wp_xml_file = open(sys.argv[1])
wp_xml_doc = wp_xml_file.read()
translator = Blogger2MovableType(wp_xml_doc)
print translator.Translate()
wp_xml_file.close()
|
apache-2.0
|
hyperspy/hyperspyUI
|
hyperspyui/plugincreator.py
|
3
|
3831
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 The HyperSpyUI developers
#
# This file is part of HyperSpyUI.
#
# HyperSpyUI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpyUI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpyUI. If not, see <http://www.gnu.org/licenses/>.
"""
Created on Sat Feb 21 12:04:06 2015
@author: Vidar Tonaas Fauske
"""
"""
Utility to make it easier to create plugins directly through the UI.
"""
import os
import string
import hyperspyui.plugins.plugin
header = """from hyperspyui.plugins.plugin import Plugin
import numpy as np
import hyperspy.api as hs
class {0}(Plugin):
name = "{1}"
def create_actions(self):"""
action_noicon = """
self.add_action(self.name + '.default', self.name, self.default,
tip="")
"""
action_icon = """
self.add_action(self.name + '.default', self.name, self.default,
icon="{0}",
tip="")
"""
menu_def = """
def create_menu(self):
self.add_menuitem('{0}', self.ui.actions[self.name + '.default'])
"""
toolbar_def = """
def create_toolbars(self):
self.add_toolbar_button('{0}', self.ui.actions[self.name + '.default'])
"""
default = """
def default(self):
{0}
"""
def indent(lines, amount, ch=' '):
padding = amount * ch
return padding + ('\n' + padding).join(lines.split('\n'))
def suggest_plugin_filename(name):
filename = name.lower() + '.py'
dirname = os.path.dirname(hyperspyui.plugins.plugin.__file__)
path = dirname + os.path.sep + filename
return path
def create_plugin_code(code, name, category=None, menu=False, toolbar=False,
icon=None):
"""Create a plugin with an action that will execute 'code' when triggered.
If 'menu' and/or 'toolbar' is True, the corresponding items will be added
for the action.
"""
if category is None:
category = name
safe_name = string.capwords(name).replace(" ", "")
plugin_code = header.format(safe_name, name)
if icon is None:
plugin_code += action_noicon.format()
else:
plugin_code += action_icon.format(icon)
if menu:
plugin_code += menu_def.format(category)
if toolbar:
plugin_code += toolbar_def.format(category)
# Indent code by two levels
code = indent(code, 2 * 4)
plugin_code += default.format(code)
try:
import autopep8
plugin_code = autopep8.fix_code(plugin_code,
options=autopep8.parse_args(
['--aggressive', '--aggressive', '']))
except ImportError:
pass
return plugin_code
def create_plugin_file(code, name, category=None, menu=False, toolbar=False,
filename=None):
"""Create a plugin with an action that will execute 'code' when triggered.
If 'menu' and/or 'toolbar' is True, the corresponding items will be added
for the action.
"""
if filename is None:
path = suggest_plugin_filename(name)
elif os.path.isabs(filename):
path = filename
else:
dirname = os.path.dirname(hyperspyui.plugins.plugin.__file__)
path = dirname + os.path.sep + filename
with open(path, 'w') as f:
f.write(create_plugin_code(code, name, category, menu, toolbar))
return path
|
gpl-3.0
|
75651/kbengine_cloud
|
kbe/src/lib/python/Lib/pydoc_data/topics.py
|
63
|
386447
|
# -*- coding: utf-8 -*-
# Autogenerated by Sphinx on Sun Oct 5 19:01:41 2014
topics = {'assert': '\nThe "assert" statement\n**********************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, "assert expression", is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, "assert expression1, expression2", is equivalent to\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that "__debug__" and "AssertionError" refer\nto the built-in variables with those names. In the current\nimplementation, the built-in variable "__debug__" is "True" under\nnormal circumstances, "False" when optimization is requested (command\nline option -O). The current code generator emits no code for an\nassert statement when optimization is requested at compile time. Note\nthat it is unnecessary to include the source code for the expression\nthat failed in the error message; it will be displayed as part of the\nstack trace.\n\nAssignments to "__debug__" are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n',
'assignment': '\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n | "*" target\n\n(See section *Primaries* for the syntax definitions for\n*attributeref*, *subscription*, and *slicing*.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list, optionally enclosed in\nparentheses or square brackets, is recursively defined as follows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The\n object must be an iterable with the same number of items as there\n are targets in the target list, and the items are assigned, from\n left to right, to the corresponding targets.\n\n * If the target list contains one target prefixed with an\n asterisk, called a "starred" target: The object must be a sequence\n with at least as many items as there are targets in the target\n list, minus one. The first items of the sequence are assigned,\n from left to right, to the targets before the starred target. The\n final items of the sequence are assigned to the targets after the\n starred target. A list of the remaining items in the sequence is\n then assigned to the starred target (the list can be empty).\n\n * Else: The object must be a sequence with the same number of\n items as there are targets in the target list, and the items are\n assigned, from left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a "global" or "nonlocal" statement\n in the current code block: the name is bound to the object in the\n current local namespace.\n\n * Otherwise: the name is bound to the object in the global\n namespace or the outer namespace determined by "nonlocal",\n respectively.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in\n square brackets: The object must be an iterable with the same number\n of items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, "TypeError" is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily "AttributeError").\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n "a.x" can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target "a.x" is always\n set as an instance attribute, creating it if necessary. Thus, the\n two occurrences of "a.x" do not necessarily refer to the same\n attribute: if the RHS expression refers to a class attribute, the\n LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with "property()".\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield an integer. If it is negative, the sequence\'s\n length is added to it. The resulting value must be a nonnegative\n integer less than the sequence\'s length, and the sequence is asked\n to assign the assigned object to its item with that index. If the\n index is out of range, "IndexError" is raised (assignment to a\n subscripted sequence cannot add new items to a list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n For user-defined objects, the "__setitem__()" method is called with\n appropriate arguments.\n\n* If the target is a slicing: The primary expression in the\n reference is evaluated. It should yield a mutable sequence object\n (such as a list). The assigned object should be a sequence object\n of the same type. Next, the lower and upper bound expressions are\n evaluated, insofar they are present; defaults are zero and the\n sequence\'s length. The bounds should evaluate to integers. If\n either bound is negative, the sequence\'s length is added to it. The\n resulting bounds are clipped to lie between zero and the sequence\'s\n length, inclusive. Finally, the sequence object is asked to replace\n the slice with the items of the assigned sequence. The length of\n the slice may be different from the length of the assigned sequence,\n thus changing the length of the target sequence, if the target\n sequence allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nAlthough the definition of assignment implies that overlaps between\nthe left-hand side and the right-hand side are \'simultanenous\' (for\nexample "a, b = b, a" swaps two variables), overlaps *within* the\ncollection of assigned-to variables occur left-to-right, sometimes\nresulting in confusion. For instance, the following program prints\n"[0, 2]":\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2 # i is updated, then x[i] is updated\n print(x)\n\nSee also: **PEP 3132** - Extended Iterable Unpacking\n\n The specification for the "*target" feature.\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions of the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nUnlike normal assignments, augmented assignments evaluate the left-\nhand side *before* evaluating the right-hand side. For example, "a[i]\n+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs\nthe addition, and lastly, it writes the result back to "a[i]".\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n',
'atom-identifiers': '\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a "NameError" exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name, with leading underscores removed and a single underscore\ninserted, in front of the name. For example, the identifier "__spam"\noccurring in a class named "Ham" will be transformed to "_Ham__spam".\nThis transformation is independent of the syntactical context in which\nthe identifier is used. If the transformed name is extremely long\n(longer than 255 characters), implementation defined truncation may\nhappen. If the class name consists only of underscores, no\ntransformation is done.\n',
'atom-literals': "\nLiterals\n********\n\nPython supports string and bytes literals and various numeric\nliterals:\n\n literal ::= stringliteral | bytesliteral\n | integer | floatnumber | imagnumber\n\nEvaluation of a literal yields an object of the given type (string,\nbytes, integer, floating point number, complex number) with the given\nvalue. The value may be approximated in the case of floating point\nand imaginary (complex) literals. See section *Literals* for details.\n\nAll literals correspond to immutable data types, and hence the\nobject's identity is less important than its value. Multiple\nevaluations of literals with the same value (either the same\noccurrence in the program text or a different occurrence) may obtain\nthe same object or a different object with the same value.\n",
'attribute-access': '\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control over attribute access.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special\n methods as the result of implicit invocation via language syntax\n or built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should call the base class method with the same name, for example,\n "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\nobject.__dir__(self)\n\n Called when "dir()" is called on the object. A sequence must be\n returned. "dir()" converts the returned sequence to a list and\n sorts it.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\nThe attribute "__objclass__" is interpreted by the "inspect" module as\nspecifying the class where this object was defined (setting this\nappropriately can assist in runtime introspection of dynamic class\nattributes). For callables, it may indicate that an instance of the\ngiven type (or a subclass) is expected or required as the first\npositional argument (for example, CPython sets this attribute for\nunbound methods that are implemented in C).\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to an object instance, "a.x" is transformed into the\n call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a class, "A.x" is transformed into the call:\n "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of classes have a dictionary for attribute\nstorage. This wastes space for objects having very few instance\nvariables. The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable. Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n class, *__slots__* reserves space for the declared variables and\n prevents the automatic creation of *__dict__* and *__weakref__* for\n each instance.\n\n\nNotes on using *__slots__*\n--------------------------\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* Without a *__weakref__* variable for each instance, classes\n defining *__slots__* do not support weak references to its\n instances. If weak reference support is needed, then add\n "\'__weakref__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n instance variable defined by the base class slot is inaccessible\n (except by retrieving its descriptor directly from the base class).\n This renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "int", "bytes" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n may also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n',
'attribute-references': '\nAttribute references\n********************\n\nAn attribute reference is a primary followed by a period and a name:\n\n attributeref ::= primary "." identifier\n\nThe primary must evaluate to an object of a type that supports\nattribute references, which most objects do. This object is then\nasked to produce the attribute whose name is the identifier. This\nproduction can be customized by overriding the "__getattr__()" method.\nIf this attribute is not available, the exception "AttributeError" is\nraised. Otherwise, the type and value of the object produced is\ndetermined by the object. Multiple evaluations of the same attribute\nreference may yield different objects.\n',
'augassign': '\nAugmented assignment statements\n*******************************\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions of the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nUnlike normal assignments, augmented assignments evaluate the left-\nhand side *before* evaluating the right-hand side. For example, "a[i]\n+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs\nthe addition, and lastly, it writes the result back to "a[i]".\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n',
'binary': '\nBinary arithmetic operations\n****************************\n\nThe binary arithmetic operations have the conventional priority\nlevels. Note that some of these operations also apply to certain non-\nnumeric types. Apart from the power operator, there are only two\nlevels, one for multiplicative operators and one for additive\noperators:\n\n m_expr ::= u_expr | m_expr "*" u_expr | m_expr "//" u_expr | m_expr "/" u_expr\n | m_expr "%" u_expr\n a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n\nThe "*" (multiplication) operator yields the product of its arguments.\nThe arguments must either both be numbers, or one argument must be an\ninteger and the other must be a sequence. In the former case, the\nnumbers are converted to a common type and then multiplied together.\nIn the latter case, sequence repetition is performed; a negative\nrepetition factor yields an empty sequence.\n\nThe "/" (division) and "//" (floor division) operators yield the\nquotient of their arguments. The numeric arguments are first\nconverted to a common type. Division of integers yields a float, while\nfloor division of integers results in an integer; the result is that\nof mathematical division with the \'floor\' function applied to the\nresult. Division by zero raises the "ZeroDivisionError" exception.\n\nThe "%" (modulo) operator yields the remainder from the division of\nthe first argument by the second. The numeric arguments are first\nconverted to a common type. A zero right argument raises the\n"ZeroDivisionError" exception. The arguments may be floating point\nnumbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals "4*0.7 +\n0.34".) The modulo operator always yields a result with the same sign\nas its second operand (or zero); the absolute value of the result is\nstrictly smaller than the absolute value of the second operand [1].\n\nThe floor division and modulo operators are connected by the following\nidentity: "x == (x//y)*y + (x%y)". Floor division and modulo are also\nconnected with the built-in function "divmod()": "divmod(x, y) ==\n(x//y, x%y)". [2].\n\nIn addition to performing the modulo operation on numbers, the "%"\noperator is also overloaded by string objects to perform old-style\nstring formatting (also known as interpolation). The syntax for\nstring formatting is described in the Python Library Reference,\nsection *printf-style String Formatting*.\n\nThe floor division operator, the modulo operator, and the "divmod()"\nfunction are not defined for complex numbers. Instead, convert to a\nfloating point number using the "abs()" function if appropriate.\n\nThe "+" (addition) operator yields the sum of its arguments. The\narguments must either both be numbers or both be sequences of the same\ntype. In the former case, the numbers are converted to a common type\nand then added together. In the latter case, the sequences are\nconcatenated.\n\nThe "-" (subtraction) operator yields the difference of its arguments.\nThe numeric arguments are first converted to a common type.\n',
'bitwise': '\nBinary bitwise operations\n*************************\n\nEach of the three bitwise operations has a different priority level:\n\n and_expr ::= shift_expr | and_expr "&" shift_expr\n xor_expr ::= and_expr | xor_expr "^" and_expr\n or_expr ::= xor_expr | or_expr "|" xor_expr\n\nThe "&" operator yields the bitwise AND of its arguments, which must\nbe integers.\n\nThe "^" operator yields the bitwise XOR (exclusive OR) of its\narguments, which must be integers.\n\nThe "|" operator yields the bitwise (inclusive) OR of its arguments,\nwhich must be integers.\n',
'bltin-code-objects': '\nCode Objects\n************\n\nCode objects are used by the implementation to represent "pseudo-\ncompiled" executable Python code such as a function body. They differ\nfrom function objects because they don\'t contain a reference to their\nglobal execution environment. Code objects are returned by the built-\nin "compile()" function and can be extracted from function objects\nthrough their "__code__" attribute. See also the "code" module.\n\nA code object can be executed or evaluated by passing it (instead of a\nsource string) to the "exec()" or "eval()" built-in functions.\n\nSee *The standard type hierarchy* for more information.\n',
'bltin-ellipsis-object': '\nThe Ellipsis Object\n*******************\n\nThis object is commonly used by slicing (see *Slicings*). It supports\nno special operations. There is exactly one ellipsis object, named\n"Ellipsis" (a built-in name). "type(Ellipsis)()" produces the\n"Ellipsis" singleton.\n\nIt is written as "Ellipsis" or "...".\n',
'bltin-null-object': '\nThe Null Object\n***************\n\nThis object is returned by functions that don\'t explicitly return a\nvalue. It supports no special operations. There is exactly one null\nobject, named "None" (a built-in name). "type(None)()" produces the\nsame singleton.\n\nIt is written as "None".\n',
'bltin-type-objects': '\nType Objects\n************\n\nType objects represent the various object types. An object\'s type is\naccessed by the built-in function "type()". There are no special\noperations on types. The standard module "types" defines names for\nall standard built-in types.\n\nTypes are written like this: "<class \'int\'>".\n',
'booleans': '\nBoolean operations\n******************\n\n or_test ::= and_test | or_test "or" and_test\n and_test ::= not_test | and_test "and" not_test\n not_test ::= comparison | "not" not_test\n\nIn the context of Boolean operations, and also when expressions are\nused by control flow statements, the following values are interpreted\nas false: "False", "None", numeric zero of all types, and empty\nstrings and containers (including strings, tuples, lists,\ndictionaries, sets and frozensets). All other values are interpreted\nas true. User-defined objects can customize their truth value by\nproviding a "__bool__()" method.\n\nThe operator "not" yields "True" if its argument is false, "False"\notherwise.\n\nThe expression "x and y" first evaluates *x*; if *x* is false, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\nThe expression "x or y" first evaluates *x*; if *x* is true, its value\nis returned; otherwise, *y* is evaluated and the resulting value is\nreturned.\n\n(Note that neither "and" nor "or" restrict the value and type they\nreturn to "False" and "True", but rather return the last evaluated\nargument. This is sometimes useful, e.g., if "s" is a string that\nshould be replaced by a default value if it is empty, the expression\n"s or \'foo\'" yields the desired value. Because "not" has to create a\nnew value, it returns a boolean value regardless of the type of its\nargument (for example, "not \'foo\'" produces "False" rather than "\'\'".)\n',
'break': '\nThe "break" statement\n*********************\n\n break_stmt ::= "break"\n\n"break" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition within that\nloop.\n\nIt terminates the nearest enclosing loop, skipping the optional "else"\nclause if the loop has one.\n\nIf a "for" loop is terminated by "break", the loop control target\nkeeps its current value.\n\nWhen "break" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nloop.\n',
'callable-types': '\nEmulating callable objects\n**************************\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n',
'calls': '\nCalls\n*****\n\nA call calls a callable object (e.g., a *function*) with a possibly\nempty series of *arguments*:\n\n call ::= primary "(" [argument_list [","] | comprehension] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," keyword_arguments] ["," "**" expression]\n | "*" expression ["," keyword_arguments] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nAn optional trailing comma may be present after the positional and\nkeyword arguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and all objects having a\n"__call__()" method are callable). All argument expressions are\nevaluated before the call is attempted. Please refer to section\n*Function definitions* for the syntax of formal *parameter* lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a "TypeError" exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is "None", it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a "TypeError"\nexception is raised. Otherwise, the list of filled slots is used as\nthe argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword. In CPython, this is the case\nfor functions implemented in C that use "PyArg_ParseTuple()" to parse\ntheir arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "*identifier" is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "**identifier" is present; in this case, that formal\nparameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax "*expression" appears in the function call, "expression"\nmust evaluate to an iterable. Elements from this iterable are treated\nas if they were additional positional arguments; if there are\npositional arguments *x1*, ..., *xN*, and "expression" evaluates to a\nsequence *y1*, ..., *yM*, this is equivalent to a call with M+N\npositional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the "*expression" syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the "**expression" argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print(a, b)\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the "*expression" syntax\nto be used in the same call, so in practice this confusion does not\narise.\n\nIf the syntax "**expression" appears in the function call,\n"expression" must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both "expression" and as an explicit keyword argument, a\n"TypeError" exception is raised.\n\nFormal parameters using the syntax "*identifier" or "**identifier"\ncannot be used as positional argument slots or as keyword argument\nnames.\n\nA call always returns some value, possibly "None", unless it raises an\nexception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n *Function definitions*. When the code block executes a "return"\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see *Built-in Functions* for\n the descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a "__call__()" method; the effect is then the\n same as if that method was called.\n',
'class': '\nClass definitions\n*****************\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class "object"; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with "self.name = value". Both class and\ninstance attributes are accessible through the notation ""self.name"",\nand an instance attribute hides a class attribute with the same name\nwhen accessed in this way. Class attributes can be used as defaults\nfor instance attributes, but using mutable values there can lead to\nunexpected results. *Descriptors* can be used to create instance\nvariables with different implementation details.\n\nSee also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** -\n Class Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n there is a "finally" clause which happens to raise another\n exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n an exception or the execution of a "return", "continue", or\n "break" statement.\n\n[3] A string literal appearing as the first statement in the\n function body is transformed into the function\'s "__doc__"\n attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n',
'comparisons': '\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects need not have the same type. If both are\nnumbers, they are converted to a common type. Otherwise, the "==" and\n"!=" operators *always* consider objects of different types to be\nunequal, while the "<", ">", ">=" and "<=" operators raise a\n"TypeError" when comparing objects of different types that do not\nimplement these operators for the given pair of types. You can\ncontrol comparison behavior of objects of non-built-in types by\ndefining rich comparison methods like "__gt__()", described in section\n*Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values "float(\'NaN\')" and "Decimal(\'NaN\')" are special. The\n are identical to themselves, "x is x" but are not equal to\n themselves, "x != x". Additionally, comparing any value to a\n not-a-number value will return "False". For example, both "3 <\n float(\'NaN\')" and "float(\'NaN\') < 3" will return "False".\n\n* Bytes objects are compared lexicographically using the numeric\n values of their elements.\n\n* Strings are compared lexicographically using the numeric\n equivalents (the result of the built-in function "ord()") of their\n characters. [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison\n of corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, "[1,2,x] <= [1,2,y]" has the same\n value as "x <= y". If the corresponding element does not exist, the\n shorter sequence is ordered first (for example, "[1,2] < [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if they have the\n same "(key, value)" pairs. Order comparisons "(\'<\', \'<=\', \'>=\',\n \'>\')" raise "TypeError".\n\n* Sets and frozensets define comparison operators to mean subset and\n superset tests. Those relations do not define total orderings (the\n two sets "{1,2}" and {2,3} are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering.\n For example, "min()", "max()", and "sorted()" produce undefined\n results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they\n are the same object; the choice whether one object is considered\n smaller or larger than another one is made arbitrarily but\n consistently within one execution of a program.\n\nComparison of objects of differing types depends on whether either of\nthe types provide explicit support for the comparison. Most numeric\ntypes can be compared with one another. When cross-type comparison is\nnot supported, the comparison method returns "NotImplemented".\n\nThe operators "in" and "not in" test for membership. "x in s"\nevaluates to true if *x* is a member of *s*, and false otherwise. "x\nnot in s" returns the negation of "x in s". All built-in sequences\nand set types support this as well as dictionary, for which "in" tests\nwhether the dictionary has a given key. For container types such as\nlist, tuple, set, frozenset, dict, or collections.deque, the\nexpression "x in y" is equivalent to "any(x is e or x == e for e in\ny)".\n\nFor the string and bytes types, "x in y" is true if and only if *x* is\na substring of *y*. An equivalent test is "y.find(x) != -1". Empty\nstrings are always considered to be a substring of any other string,\nso """ in "abc"" will return "True".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [4]\n',
'compound': '\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe "if", "while" and "for" statements implement traditional control\nflow constructs. "try" specifies exception handlers and/or cleanup\ncode for a group of statements, while the "with" statement allows the\nexecution of initialization and finalization code around a block of\ncode. Function and class definitions are also syntactically compound\nstatements.\n\nA compound statement consists of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of a suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which "if" clause a following "else" clause would belong:\n\n if test1: if test2: print(x)\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n"print()" calls are executed:\n\n if x < y < z: print(x); print(y); print(z)\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a "NEWLINE" possibly followed by a\n"DEDENT". Also note that optional continuation clauses always begin\nwith a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling "else"\' problem is solved in Python by\nrequiring nested "if" statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe "if" statement\n==================\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n\n\nThe "while" statement\n=====================\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n\n\nThe "for" statement\n===================\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order returned by the iterator. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a "StopIteration" exception),\nthe suite in the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there is no next\nitem.\n\nThe for-loop makes assignments to the variables(s) in the target list.\nThis overwrites all previous assignments to those variables including\nthose made in the suite of the for-loop:\n\n for i in range(10):\n print(i)\n i = 5 # this will not affect the for-loop\n # because i will be overwritten with the next\n # index in the range\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, they will not have been assigned to at\nall by the loop. Hint: the built-in function "range()" returns an\niterator of integers suitable to emulate the effect of Pascal\'s "for i\n:= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n loop (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe "try" statement\n===================\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" identifier]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the "as" keyword in that except clause, if\npresent, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using "as target", it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the "sys" module and can be accessed via\n"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of the\nexception class, the exception instance and a traceback object (see\nsection *The standard type hierarchy*) identifying the point in the\nprogram where the exception occurred. "sys.exc_info()" values are\nrestored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception it is re-raised at the end of the "finally"\nclause. If the "finally" clause raises another exception, the saved\nexception is set as the context of the new exception. If the "finally"\nclause executes a "return" or "break" statement, the saved exception\nis discarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the "raise" statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe "with" statement\n====================\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n value from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()"\n method returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n | "*" [parameter] ("," defparameter)* ["," "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the ""*"" must also have a default value --- this\nis a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated from left to right when the\nfunction definition is executed.** This means that the expression is\nevaluated once, when the function is defined, and that the same "pre-\ncomputed" value is used for each call. This is especially important\nto understand when a default parameter is a mutable object, such as a\nlist or a dictionary: if the function modifies the object (e.g. by\nappending an item to a list), the default value is in effect modified.\nThis is generally not what was intended. A way around this is to use\n"None" as the default, and explicitly test for it in the body of the\nfunction, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n""*identifier"" is present, it is initialized to a tuple receiving any\nexcess positional parameters, defaulting to the empty tuple. If the\nform ""**identifier"" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after ""*"" or ""*identifier"" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "": expression"" following\nthe parameter name. Any parameter may have an annotation even those\nof the form "*identifier" or "**identifier". Functions may have\n"return" annotation of the form ""-> expression"" after the parameter\nlist. These annotations can be any valid Python expression and are\nevaluated when the function definition is executed. Annotations may\nbe evaluated in a different order than they appear in the source code.\nThe presence of annotations does not change the semantics of a\nfunction. The annotation values are available as values of a\ndictionary keyed by the parameters\' names in the "__annotations__"\nattribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section *Lambdas*. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nstatement executed inside a function definition defines a local\nfunction that can be returned or passed around. Free variables used\nin the nested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\nSee also: **PEP 3107** - Function Annotations\n\n The original specification for function annotations.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class "object"; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with "self.name = value". Both class and\ninstance attributes are accessible through the notation ""self.name"",\nand an instance attribute hides a class attribute with the same name\nwhen accessed in this way. Class attributes can be used as defaults\nfor instance attributes, but using mutable values there can lead to\nunexpected results. *Descriptors* can be used to create instance\nvariables with different implementation details.\n\nSee also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** -\n Class Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n there is a "finally" clause which happens to raise another\n exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n an exception or the execution of a "return", "continue", or\n "break" statement.\n\n[3] A string literal appearing as the first statement in the\n function body is transformed into the function\'s "__doc__"\n attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n',
'context-managers': '\nWith Statement Context Managers\n*******************************\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section *The with\nstatement*), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n',
'continue': '\nThe "continue" statement\n************************\n\n continue_stmt ::= "continue"\n\n"continue" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition or "finally"\nclause within that loop. It continues with the next cycle of the\nnearest enclosing loop.\n\nWhen "continue" passes control out of a "try" statement with a\n"finally" clause, that "finally" clause is executed before really\nstarting the next loop cycle.\n',
'conversions': '\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," this means\nthat the operator implementation for built-in types works as follows:\n\n* If either argument is a complex number, the other is converted to\n complex;\n\n* otherwise, if either argument is a floating point number, the\n other is converted to floating point;\n\n* otherwise, both must be integers and no conversion is necessary.\n\nSome additional rules apply for certain operators (e.g., a string as a\nleft argument to the \'%\' operator). Extensions must define their own\nconversion behavior.\n',
'customization': '\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])". As a special constraint on\n constructors, no value may be returned; doing so will cause a\n "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_info()[2]" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing "None" in\n "sys.last_traceback". Circular references which are garbage are\n detected and cleaned up when the cyclic garbage collector is\n enabled (it\'s on by default). Refer to the documentation for the\n "gc" module for more information about this topic.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function to compute the "official"\n string representation of an object. If at all possible, this\n should look like a valid Python expression that could be used to\n recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n "<...some useful description...>" should be returned. The return\n value must be a string object. If a class defines "__repr__()" but\n not "__str__()", then "__repr__()" is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by "str(object)" and the built-in functions "format()" and\n "print()" to compute the "informal" or nicely printable string\n representation of an object. The return value must be a *string*\n object.\n\n This method differs from "object.__repr__()" in that there is no\n expectation that "__str__()" return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type "object"\n calls "object.__repr__()".\n\nobject.__bytes__(self)\n\n Called by "bytes()" to compute a byte-string representation of an\n object. This should return a "bytes" object.\n\nobject.__format__(self, format_spec)\n\n Called by the "format()" built-in function (and by extension, the\n "str.format()" method of class "str") to produce a "formatted"\n string representation of an object. The "format_spec" argument is a\n string that contains a description of the formatting options\n desired. The interpretation of the "format_spec" argument is up to\n the type implementing "__format__()", however most classes will\n either delegate formatting to one of the built-in types, or use a\n similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\n Changed in version 3.4: The __format__ method of "object" itself\n raises a "TypeError" if passed any non-empty string.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: "x<y" calls "x.__lt__(y)", "x<=y" calls "x.__le__(y)",\n "x==y" calls "x.__eq__(y)", "x!=y" calls "x.__ne__(y)", "x>y" calls\n "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n Note: "hash()" truncates the value returned from an object\'s\n custom "__hash__()" method to the size of a "Py_ssize_t". This\n is typically 8 bytes on 64-bit builds and 4 bytes on 32-bit\n builds. If an object\'s "__hash__()" must interoperate on builds\n of different bit sizes, be sure to check the width on all\n supported builds. An easy way to do this is with "python -c\n "import sys; print(sys.hash_info.width)""\n\n If a class does not define an "__eq__()" method it should not\n define a "__hash__()" operation either; if it defines "__eq__()"\n but not "__hash__()", its instances will not be usable as items in\n hashable collections. If a class defines mutable objects and\n implements an "__eq__()" method, it should not implement\n "__hash__()", since the implementation of hashable collections\n requires that a key\'s hash value is immutable (if the object\'s hash\n value changes, it will be in the wrong hash bucket).\n\n User-defined classes have "__eq__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns an appropriate value such\n that "x == y" implies both that "x is y" and "hash(x) == hash(y)".\n\n A class that overrides "__eq__()" and does not define "__hash__()"\n will have its "__hash__()" implicitly set to "None". When the\n "__hash__()" method of a class is "None", instances of the class\n will raise an appropriate "TypeError" when a program attempts to\n retrieve their hash value, and will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable").\n\n If a class that overrides "__eq__()" needs to retain the\n implementation of "__hash__()" from a parent class, the interpreter\n must be told this explicitly by setting "__hash__ =\n <ParentClass>.__hash__".\n\n If a class that does not override "__eq__()" wishes to suppress\n hash support, it should include "__hash__ = None" in the class\n definition. A class which defines its own "__hash__()" that\n explicitly raises a "TypeError" would be incorrectly identified as\n hashable by an "isinstance(obj, collections.Hashable)" call.\n\n Note: By default, the "__hash__()" values of str, bytes and\n datetime objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also "PYTHONHASHSEED".\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True". When this method is not\n defined, "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__bool__()", all its instances are\n considered true.\n',
'debugger': '\n"pdb" --- The Python Debugger\n*****************************\n\nThe module "pdb" defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible -- it is actually defined as the class\n"Pdb". This is currently undocumented but easily understood by reading\nthe source. The extension interface uses the modules "bdb" and "cmd".\n\nThe debugger\'s prompt is "(Pdb)". Typical usage to run a program under\ncontrol of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > <string>(0)?()\n (Pdb) continue\n > <string>(1)?()\n (Pdb) continue\n NameError: \'spam\'\n > <string>(1)?()\n (Pdb)\n\nChanged in version 3.3: Tab-completion via the "readline" module is\navailable for commands and command arguments, e.g. the current global\nand local names are offered as arguments of the "p" command.\n\n"pdb.py" can also be invoked as a script to debug other scripts. For\nexample:\n\n python3 -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 3.2: "pdb.py" now accepts a "-c" option that executes\ncommands as if given in a ".pdbrc" file, see *Debugger Commands*.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the "continue" command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print(spam)\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print(spam)\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement, globals=None, locals=None)\n\n Execute the *statement* (given as a string or a code object) under\n debugger control. The debugger prompt appears before any code is\n executed; you can set breakpoints and type "continue", or you can\n step through the statement using "step" or "next" (all these\n commands are explained below). The optional *globals* and *locals*\n arguments specify the environment in which the code is executed; by\n default the dictionary of the module "__main__" is used. (See the\n explanation of the built-in "exec()" or "eval()" functions.)\n\npdb.runeval(expression, globals=None, locals=None)\n\n Evaluate the *expression* (given as a string or a code object)\n under debugger control. When "runeval()" returns, it returns the\n value of the expression. Otherwise this function is similar to\n "run()".\n\npdb.runcall(function, *args, **kwds)\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When "runcall()" returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem(traceback=None)\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n "sys.last_traceback".\n\nThe "run*" functions and "set_trace()" are aliases for instantiating\nthe "Pdb" class and calling the method of the same name. If you want\nto access further features, you have to do this yourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None, nosigint=False)\n\n "Pdb" is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying "cmd.Cmd" class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n By default, Pdb sets a handler for the SIGINT signal (which is sent\n when the user presses Ctrl-C on the console) when you give a\n "continue" command. This allows you to break into the debugger\n again by pressing Ctrl-C. If you want Pdb not to touch the SIGINT\n handler, set *nosigint* tot true.\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 3.1: The *skip* argument.\n\n New in version 3.2: The *nosigint* argument. Previously, a SIGINT\n handler was never set by Pdb.\n\n run(statement, globals=None, locals=None)\n runeval(expression, globals=None, locals=None)\n runcall(function, *args, **kwds)\n set_trace()\n\n See the documentation for the functions explained above.\n\n\nDebugger Commands\n=================\n\nThe commands recognized by the debugger are listed below. Most\ncommands can be abbreviated to one or two letters as indicated; e.g.\n"h(elp)" means that either "h" or "help" can be used to enter the help\ncommand (but not "he" or "hel", nor "H" or "Help" or "HELP").\nArguments to commands must be separated by whitespace (spaces or\ntabs). Optional arguments are enclosed in square brackets ("[]") in\nthe command syntax; the square brackets must not be typed.\nAlternatives in the command syntax are separated by a vertical bar\n("|").\n\nEntering a blank line repeats the last command entered. Exception: if\nthe last command was a "list" command, the next 11 lines are listed.\n\nCommands that the debugger doesn\'t recognize are assumed to be Python\nstatements and are executed in the context of the program being\ndebugged. Python statements can also be prefixed with an exclamation\npoint ("!"). This is a powerful way to inspect the program being\ndebugged; it is even possible to change a variable or call a function.\nWhen an exception occurs in such a statement, the exception name is\nprinted but the debugger\'s state is not changed.\n\nThe debugger supports *aliases*. Aliases can have parameters which\nallows one a certain level of adaptability to the context under\nexamination.\n\nMultiple commands may be entered on a single line, separated by ";;".\n(A single ";" is not used as it is the separator for multiple commands\nin a line that is passed to the Python parser.) No intelligence is\napplied to separating the commands; the input is split at the first\n";;" pair, even if it is in the middle of a quoted string.\n\nIf a file ".pdbrc" exists in the user\'s home directory or in the\ncurrent directory, it is read in and executed as if it had been typed\nat the debugger prompt. This is particularly useful for aliases. If\nboth files exist, the one in the home directory is read first and\naliases defined there can be overridden by the local file.\n\nChanged in version 3.2: ".pdbrc" can now contain commands that\ncontinue debugging, such as "continue" or "next". Previously, these\ncommands had no effect.\n\nh(elp) [command]\n\n Without argument, print the list of available commands. With a\n *command* as argument, print help about that command. "help pdb"\n displays the full documentation (the docstring of the "pdb"\n module). Since the *command* argument must be an identifier, "help\n exec" must be entered to get help on the "!" command.\n\nw(here)\n\n Print a stack trace, with the most recent frame at the bottom. An\n arrow indicates the current frame, which determines the context of\n most commands.\n\nd(own) [count]\n\n Move the current frame *count* (default one) levels down in the\n stack trace (to a newer frame).\n\nu(p) [count]\n\n Move the current frame *count* (default one) levels up in the stack\n trace (to an older frame).\n\nb(reak) [([filename:]lineno | function) [, condition]]\n\n With a *lineno* argument, set a break there in the current file.\n With a *function* argument, set a break at the first executable\n statement within that function. The line number may be prefixed\n with a filename and a colon, to specify a breakpoint in another\n file (probably one that hasn\'t been loaded yet). The file is\n searched on "sys.path". Note that each breakpoint is assigned a\n number to which all the other breakpoint commands refer.\n\n If a second argument is present, it is an expression which must\n evaluate to true before the breakpoint is honored.\n\n Without argument, list all breaks, including for each breakpoint,\n the number of times that breakpoint has been hit, the current\n ignore count, and the associated condition if any.\n\ntbreak [([filename:]lineno | function) [, condition]]\n\n Temporary breakpoint, which is removed automatically when it is\n first hit. The arguments are the same as for "break".\n\ncl(ear) [filename:lineno | bpnumber [bpnumber ...]]\n\n With a *filename:lineno* argument, clear all the breakpoints at\n this line. With a space separated list of breakpoint numbers, clear\n those breakpoints. Without argument, clear all breaks (but first\n ask confirmation).\n\ndisable [bpnumber [bpnumber ...]]\n\n Disable the breakpoints given as a space separated list of\n breakpoint numbers. Disabling a breakpoint means it cannot cause\n the program to stop execution, but unlike clearing a breakpoint, it\n remains in the list of breakpoints and can be (re-)enabled.\n\nenable [bpnumber [bpnumber ...]]\n\n Enable the breakpoints specified.\n\nignore bpnumber [count]\n\n Set the ignore count for the given breakpoint number. If count is\n omitted, the ignore count is set to 0. A breakpoint becomes active\n when the ignore count is zero. When non-zero, the count is\n decremented each time the breakpoint is reached and the breakpoint\n is not disabled and any associated condition evaluates to true.\n\ncondition bpnumber [condition]\n\n Set a new *condition* for the breakpoint, an expression which must\n evaluate to true before the breakpoint is honored. If *condition*\n is absent, any existing condition is removed; i.e., the breakpoint\n is made unconditional.\n\ncommands [bpnumber]\n\n Specify a list of commands for breakpoint number *bpnumber*. The\n commands themselves appear on the following lines. Type a line\n containing just "end" to terminate the commands. An example:\n\n (Pdb) commands 1\n (com) p some_variable\n (com) end\n (Pdb)\n\n To remove all commands from a breakpoint, type commands and follow\n it immediately with "end"; that is, give no commands.\n\n With no *bpnumber* argument, commands refers to the last breakpoint\n set.\n\n You can use breakpoint commands to start your program up again.\n Simply use the continue command, or step, or any other command that\n resumes execution.\n\n Specifying any command resuming execution (currently continue,\n step, next, return, jump, quit and their abbreviations) terminates\n the command list (as if that command was immediately followed by\n end). This is because any time you resume execution (even with a\n simple next or step), you may encounter another breakpoint--which\n could have its own command list, leading to ambiguities about which\n list to execute.\n\n If you use the \'silent\' command in the command list, the usual\n message about stopping at a breakpoint is not printed. This may be\n desirable for breakpoints that are to print a specific message and\n then continue. If none of the other commands print anything, you\n see no sign that the breakpoint was reached.\n\ns(tep)\n\n Execute the current line, stop at the first possible occasion\n (either in a function that is called or on the next line in the\n current function).\n\nn(ext)\n\n Continue execution until the next line in the current function is\n reached or it returns. (The difference between "next" and "step"\n is that "step" stops inside a called function, while "next"\n executes called functions at (nearly) full speed, only stopping at\n the next line in the current function.)\n\nunt(il) [lineno]\n\n Without argument, continue execution until the line with a number\n greater than the current one is reached.\n\n With a line number, continue execution until a line with a number\n greater or equal to that is reached. In both cases, also stop when\n the current frame returns.\n\n Changed in version 3.2: Allow giving an explicit line number.\n\nr(eturn)\n\n Continue execution until the current function returns.\n\nc(ont(inue))\n\n Continue execution, only stop when a breakpoint is encountered.\n\nj(ump) lineno\n\n Set the next line that will be executed. Only available in the\n bottom-most frame. This lets you jump back and execute code again,\n or jump forward to skip code that you don\'t want to run.\n\n It should be noted that not all jumps are allowed -- for instance\n it is not possible to jump into the middle of a "for" loop or out\n of a "finally" clause.\n\nl(ist) [first[, last]]\n\n List source code for the current file. Without arguments, list 11\n lines around the current line or continue the previous listing.\n With "." as argument, list 11 lines around the current line. With\n one argument, list 11 lines around at that line. With two\n arguments, list the given range; if the second argument is less\n than the first, it is interpreted as a count.\n\n The current line in the current frame is indicated by "->". If an\n exception is being debugged, the line where the exception was\n originally raised or propagated is indicated by ">>", if it differs\n from the current line.\n\n New in version 3.2: The ">>" marker.\n\nll | longlist\n\n List all source code for the current function or frame.\n Interesting lines are marked as for "list".\n\n New in version 3.2.\n\na(rgs)\n\n Print the argument list of the current function.\n\np expression\n\n Evaluate the *expression* in the current context and print its\n value.\n\n Note: "print()" can also be used, but is not a debugger command\n --- this executes the Python "print()" function.\n\npp expression\n\n Like the "p" command, except the value of the expression is pretty-\n printed using the "pprint" module.\n\nwhatis expression\n\n Print the type of the *expression*.\n\nsource expression\n\n Try to get source code for the given object and display it.\n\n New in version 3.2.\n\ndisplay [expression]\n\n Display the value of the expression if it changed, each time\n execution stops in the current frame.\n\n Without expression, list all display expressions for the current\n frame.\n\n New in version 3.2.\n\nundisplay [expression]\n\n Do not display the expression any more in the current frame.\n Without expression, clear all display expressions for the current\n frame.\n\n New in version 3.2.\n\ninteract\n\n Start an interative interpreter (using the "code" module) whose\n global namespace contains all the (global and local) names found in\n the current scope.\n\n New in version 3.2.\n\nalias [name [command]]\n\n Create an alias called *name* that executes *command*. The command\n must *not* be enclosed in quotes. Replaceable parameters can be\n indicated by "%1", "%2", and so on, while "%*" is replaced by all\n the parameters. If no command is given, the current alias for\n *name* is shown. If no arguments are given, all aliases are listed.\n\n Aliases may be nested and can contain anything that can be legally\n typed at the pdb prompt. Note that internal pdb commands *can* be\n overridden by aliases. Such a command is then hidden until the\n alias is removed. Aliasing is recursively applied to the first\n word of the command line; all other words in the line are left\n alone.\n\n As an example, here are two useful aliases (especially when placed\n in the ".pdbrc" file):\n\n # Print instance variables (usage "pi classInst")\n alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])\n # Print instance variables in self\n alias ps pi self\n\nunalias name\n\n Delete the specified alias.\n\n! statement\n\n Execute the (one-line) *statement* in the context of the current\n stack frame. The exclamation point can be omitted unless the first\n word of the statement resembles a debugger command. To set a\n global variable, you can prefix the assignment command with a\n "global" statement on the same line, e.g.:\n\n (Pdb) global list_options; list_options = [\'-l\']\n (Pdb)\n\nrun [args ...]\nrestart [args ...]\n\n Restart the debugged Python program. If an argument is supplied,\n it is split with "shlex" and the result is used as the new\n "sys.argv". History, breakpoints, actions and debugger options are\n preserved. "restart" is an alias for "run".\n\nq(uit)\n\n Quit from the debugger. The program being executed is aborted.\n\n-[ Footnotes ]-\n\n[1] Whether a frame is considered to originate in a certain module\n is determined by the "__name__" in the frame globals.\n',
'del': '\nThe "del" statement\n*******************\n\n del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather than spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a "global"\nstatement in the same code block. If the name is unbound, a\n"NameError" exception will be raised.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n\nChanged in version 3.2: Previously it was illegal to delete a name\nfrom the local namespace if it occurs as a free variable in a nested\nblock.\n',
'dict': '\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n dict_display ::= "{" [key_datum_list | dict_comprehension] "}"\n key_datum_list ::= key_datum ("," key_datum)* [","]\n key_datum ::= expression ":" expression\n dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum. This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection *The standard type hierarchy*. (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.) Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n',
'dynamic-features': '\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n',
'else': '\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n',
'exceptions': '\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nNote: Exception messages are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section *The try\nstatement* and "raise" statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n these operations is not available at the time the module is\n compiled.\n',
'execmodel': '\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\nas a command line argument to the interpreter) is a code block. A\nscript command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The string argument passed\nto the built-in functions "eval()" and "exec()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes comprehensions and generator\nexpressions since they are implemented using a function scope. This\nmeans that the following will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as "nonlocal". If a name is bound at the module\nlevel, it is a global variable. (The variables of the module code\nblock are local and global.) If a variable is used in a code block\nbut not defined there, it is a *free variable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, an\n"UnboundLocalError" exception is raised. "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, or after\n"as" in a "with" statement or "except" clause. The "import" statement\nof the form "from ... import *" binds all names defined in the\nimported module, except those beginning with an underscore. This form\nmay only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the "global" statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "builtins". The global namespace is searched first. If\nthe name is not found there, the builtins namespace is searched. The\nglobal statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "builtins"; when in any\nother module, "__builtins__" is an alias for the dictionary of the\n"builtins" module itself. "__builtins__" can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "builtins" module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nNote: Exception messages are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section *The try\nstatement* and "raise" statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n these operations is not available at the time the module is\n compiled.\n',
'exprlists': '\nExpression lists\n****************\n\n expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple. The\nlength of the tuple is the number of expressions in the list. The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases. A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: "()".)\n',
'floating': '\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n floatnumber ::= pointfloat | exponentfloat\n pointfloat ::= [intpart] fraction | intpart "."\n exponentfloat ::= (intpart | pointfloat) exponent\n intpart ::= digit+\n fraction ::= "." digit+\n exponent ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts are always interpreted using\nradix 10. For example, "077e010" is legal, and denotes the same number\nas "77e10". The allowed range of floating point literals is\nimplementation-dependent. Some examples of floating point literals:\n\n 3.14 10. .001 1e100 3.14e-10 0e0\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator "-" and the\nliteral "1".\n',
'for': '\nThe "for" statement\n*******************\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order returned by the iterator. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a "StopIteration" exception),\nthe suite in the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there is no next\nitem.\n\nThe for-loop makes assignments to the variables(s) in the target list.\nThis overwrites all previous assignments to those variables including\nthose made in the suite of the for-loop:\n\n for i in range(10):\n print(i)\n i = 5 # this will not affect the for-loop\n # because i will be overwritten with the next\n # index in the range\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, they will not have been assigned to at\nall by the loop. Hint: the built-in function "range()" returns an\niterator of integers suitable to emulate the effect of Pascal\'s "for i\n:= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n loop (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n',
'formatstrings': '\nFormat String Syntax\n********************\n\nThe "str.format()" method and the "Formatter" class share the same\nsyntax for format strings (although in the case of "Formatter",\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n"{}". Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n"{{" and "}}".\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n field_name ::= arg_name ("." attribute_name | "[" element_index "]")*\n arg_name ::= [identifier | integer]\n attribute_name ::= identifier\n element_index ::= integer | index_string\n index_string ::= <any source character except "]"> +\n conversion ::= "r" | "s" | "a"\n format_spec ::= <described in the next section>\n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a *conversion* field, which is\npreceded by an exclamation point "\'!\'", and a *format_spec*, which is\npreceded by a colon "\':\'". These specify a non-default format for the\nreplacement value.\n\nSee also the *Format Specification Mini-Language* section.\n\nThe *field_name* itself begins with an *arg_name* that is either a\nnumber or a keyword. If it\'s a number, it refers to a positional\nargument, and if it\'s a keyword, it refers to a named keyword\nargument. If the numerical arg_names in a format string are 0, 1, 2,\n... in sequence, they can all be omitted (not just some) and the\nnumbers 0, 1, 2, ... will be automatically inserted in that order.\nBecause *arg_name* is not quote-delimited, it is not possible to\nspecify arbitrary dictionary keys (e.g., the strings "\'10\'" or\n"\':-]\'") within a format string. The *arg_name* can be followed by any\nnumber of index or attribute expressions. An expression of the form\n"\'.name\'" selects the named attribute using "getattr()", while an\nexpression of the form "\'[index]\'" does an index lookup using\n"__getitem__()".\n\nChanged in version 3.1: The positional argument specifiers can be\nomitted, so "\'{} {}\'" is equivalent to "\'{0} {1}\'".\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "Bring me a {}" # Implicitly references the first positional argument\n "From {} to {}" # Same as "From {0} to {1}"\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the "__format__()"\nmethod of the value itself. However, in some cases it is desirable to\nforce a type to be formatted as a string, overriding its own\ndefinition of formatting. By converting the value to a string before\ncalling "__format__()", the normal formatting logic is bypassed.\n\nThree conversion flags are currently supported: "\'!s\'" which calls\n"str()" on the value, "\'!r\'" which calls "repr()" and "\'!a\'" which\ncalls "ascii()".\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n "More {!a}" # Calls ascii() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed. The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nSee the *Format examples* section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see *Format String Syntax*). They can also be passed directly to the\nbuilt-in "format()" function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string ("""") produces\nthe same result as if you had called "str()" on the value. A non-empty\nformat string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n fill ::= <any character>\n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nIf a valid *align* value is specified, it can be preceded by a *fill*\ncharacter that can be any character and defaults to a space if\nomitted. Note that it is not possible to use "{" and "}" as *fill*\nchar while using the "str.format()" method; this limitation however\ndoesn\'t affect the "format()" function.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | "\'<\'" | Forces the field to be left-aligned within the available |\n | | space (this is the default for most objects). |\n +-----------+------------------------------------------------------------+\n | "\'>\'" | Forces the field to be right-aligned within the available |\n | | space (this is the default for numbers). |\n +-----------+------------------------------------------------------------+\n | "\'=\'" | Forces the padding to be placed after the sign (if any) |\n | | but before the digits. This is used for printing fields |\n | | in the form \'+000000120\'. This alignment option is only |\n | | valid for numeric types. |\n +-----------+------------------------------------------------------------+\n | "\'^\'" | Forces the field to be centered within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | "\'+\'" | indicates that a sign should be used for both positive as |\n | | well as negative numbers. |\n +-----------+------------------------------------------------------------+\n | "\'-\'" | indicates that a sign should be used only for negative |\n | | numbers (this is the default behavior). |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n | | numbers, and a minus sign on negative numbers. |\n +-----------+------------------------------------------------------------+\n\nThe "\'#\'" option causes the "alternate form" to be used for the\nconversion. The alternate form is defined differently for different\ntypes. This option is only valid for integer, float, complex and\nDecimal types. For integers, when binary, octal, or hexadecimal output\nis used, this option adds the prefix respective "\'0b\'", "\'0o\'", or\n"\'0x\'" to the output value. For floats, complex and Decimal the\nalternate form causes the result of the conversion to always contain a\ndecimal-point character, even if no digits follow it. Normally, a\ndecimal-point character appears in the result of these conversions\nonly if a digit follows it. In addition, for "\'g\'" and "\'G\'"\nconversions, trailing zeros are not removed from the result.\n\nThe "\',\'" option signals the use of a comma for a thousands separator.\nFor a locale aware separator, use the "\'n\'" integer presentation type\ninstead.\n\nChanged in version 3.1: Added the "\',\'" option (see also **PEP 378**).\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nPreceding the *width* field by a zero ("\'0\'") character enables sign-\naware zero-padding for numeric types. This is equivalent to a *fill*\ncharacter of "\'0\'" with an *alignment* type of "\'=\'".\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with "\'f\'" and "\'F\'", or before and after the decimal point\nfor a floating point value formatted with "\'g\'" or "\'G\'". For non-\nnumber types the field indicates the maximum field size - in other\nwords, how many characters will be used from the field content. The\n*precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'s\'" | String format. This is the default type for strings and |\n | | may be omitted. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\'s\'". |\n +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'b\'" | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | "\'c\'" | Character. Converts the integer to the corresponding |\n | | unicode character before printing. |\n +-----------+------------------------------------------------------------+\n | "\'d\'" | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | "\'o\'" | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | "\'x\'" | Hex format. Outputs the number in base 16, using lower- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | "\'X\'" | Hex format. Outputs the number in base 16, using upper- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | "\'n\'" | Number. This is the same as "\'d\'", except that it uses the |\n | | current locale setting to insert the appropriate number |\n | | separator characters. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\'d\'". |\n +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except "\'n\'"\nand None). When doing so, "float()" is used to convert the integer to\na floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'e\'" | Exponent notation. Prints the number in scientific |\n | | notation using the letter \'e\' to indicate the exponent. |\n | | The default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'E\'" | Exponent notation. Same as "\'e\'" except it uses an upper |\n | | case \'E\' as the separator character. |\n +-----------+------------------------------------------------------------+\n | "\'f\'" | Fixed point. Displays the number as a fixed-point number. |\n | | The default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'F\'" | Fixed point. Same as "\'f\'", but converts "nan" to "NAN" |\n | | and "inf" to "INF". |\n +-----------+------------------------------------------------------------+\n | "\'g\'" | General format. For a given precision "p >= 1", this |\n | | rounds the number to "p" significant digits and then |\n | | formats the result in either fixed-point format or in |\n | | scientific notation, depending on its magnitude. The |\n | | precise rules are as follows: suppose that the result |\n | | formatted with presentation type "\'e\'" and precision "p-1" |\n | | would have exponent "exp". Then if "-4 <= exp < p", the |\n | | number is formatted with presentation type "\'f\'" and |\n | | precision "p-1-exp". Otherwise, the number is formatted |\n | | with presentation type "\'e\'" and precision "p-1". In both |\n | | cases insignificant trailing zeros are removed from the |\n | | significand, and the decimal point is also removed if |\n | | there are no remaining digits following it. Positive and |\n | | negative infinity, positive and negative zero, and nans, |\n | | are formatted as "inf", "-inf", "0", "-0" and "nan" |\n | | respectively, regardless of the precision. A precision of |\n | | "0" is treated as equivalent to a precision of "1". The |\n | | default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'G\'" | General format. Same as "\'g\'" except switches to "\'E\'" if |\n | | the number gets too large. The representations of infinity |\n | | and NaN are uppercased, too. |\n +-----------+------------------------------------------------------------+\n | "\'n\'" | Number. This is the same as "\'g\'", except that it uses the |\n | | current locale setting to insert the appropriate number |\n | | separator characters. |\n +-----------+------------------------------------------------------------+\n | "\'%\'" | Percentage. Multiplies the number by 100 and displays in |\n | | fixed ("\'f\'") format, followed by a percent sign. |\n +-----------+------------------------------------------------------------+\n | None | Similar to "\'g\'", except with at least one digit past the |\n | | decimal point and a default precision of 12. This is |\n | | intended to match "str()", except you can add the other |\n | | format modifiers. |\n +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the new format syntax and comparison\nwith the old "%"-formatting.\n\nIn most of the cases the syntax is similar to the old "%"-formatting,\nwith the addition of the "{}" and with ":" used instead of "%". For\nexample, "\'%03.2f\'" can be translated to "\'{:03.2f}\'".\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n >>> \'{0}, {1}, {2}\'.format(\'a\', \'b\', \'c\')\n \'a, b, c\'\n >>> \'{}, {}, {}\'.format(\'a\', \'b\', \'c\') # 3.1+ only\n \'a, b, c\'\n >>> \'{2}, {1}, {0}\'.format(\'a\', \'b\', \'c\')\n \'c, b, a\'\n >>> \'{2}, {1}, {0}\'.format(*\'abc\') # unpacking argument sequence\n \'c, b, a\'\n >>> \'{0}{1}{0}\'.format(\'abra\', \'cad\') # arguments\' indices can be repeated\n \'abracadabra\'\n\nAccessing arguments by name:\n\n >>> \'Coordinates: {latitude}, {longitude}\'.format(latitude=\'37.24N\', longitude=\'-115.81W\')\n \'Coordinates: 37.24N, -115.81W\'\n >>> coord = {\'latitude\': \'37.24N\', \'longitude\': \'-115.81W\'}\n >>> \'Coordinates: {latitude}, {longitude}\'.format(**coord)\n \'Coordinates: 37.24N, -115.81W\'\n\nAccessing arguments\' attributes:\n\n >>> c = 3-5j\n >>> (\'The complex number {0} is formed from the real part {0.real} \'\n ... \'and the imaginary part {0.imag}.\').format(c)\n \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n >>> class Point:\n ... def __init__(self, x, y):\n ... self.x, self.y = x, y\n ... def __str__(self):\n ... return \'Point({self.x}, {self.y})\'.format(self=self)\n ...\n >>> str(Point(4, 2))\n \'Point(4, 2)\'\n\nAccessing arguments\' items:\n\n >>> coord = (3, 5)\n >>> \'X: {0[0]}; Y: {0[1]}\'.format(coord)\n \'X: 3; Y: 5\'\n\nReplacing "%s" and "%r":\n\n >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\'test1\', \'test2\')\n "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n\nAligning the text and specifying a width:\n\n >>> \'{:<30}\'.format(\'left aligned\')\n \'left aligned \'\n >>> \'{:>30}\'.format(\'right aligned\')\n \' right aligned\'\n >>> \'{:^30}\'.format(\'centered\')\n \' centered \'\n >>> \'{:*^30}\'.format(\'centered\') # use \'*\' as a fill char\n \'***********centered***********\'\n\nReplacing "%+f", "%-f", and "% f" and specifying a sign:\n\n >>> \'{:+f}; {:+f}\'.format(3.14, -3.14) # show it always\n \'+3.140000; -3.140000\'\n >>> \'{: f}; {: f}\'.format(3.14, -3.14) # show a space for positive numbers\n \' 3.140000; -3.140000\'\n >>> \'{:-f}; {:-f}\'.format(3.14, -3.14) # show only the minus -- same as \'{:f}; {:f}\'\n \'3.140000; -3.140000\'\n\nReplacing "%x" and "%o" and converting the value to different bases:\n\n >>> # format also supports binary numbers\n >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42)\n \'int: 42; hex: 2a; oct: 52; bin: 101010\'\n >>> # with 0x, 0o, or 0b as prefix:\n >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42)\n \'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n >>> \'{:,}\'.format(1234567890)\n \'1,234,567,890\'\n\nExpressing a percentage:\n\n >>> points = 19\n >>> total = 22\n >>> \'Correct answers: {:.2%}\'.format(points/total)\n \'Correct answers: 86.36%\'\n\nUsing type-specific formatting:\n\n >>> import datetime\n >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n ... \'{0:{fill}{align}16}\'.format(text, fill=align, align=align)\n ...\n \'left<<<<<<<<<<<<\'\n \'^^^^^center^^^^^\'\n \'>>>>>>>>>>>right\'\n >>>\n >>> octets = [192, 168, 0, 1]\n >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n \'C0A80001\'\n >>> int(_, 16)\n 3232235521\n >>>\n >>> width = 5\n >>> for num in range(5,12): #doctest: +NORMALIZE_WHITESPACE\n ... for base in \'dXob\':\n ... print(\'{0:{width}{base}}\'.format(num, base=base, width=width), end=\' \')\n ... print()\n ...\n 5 5 5 101\n 6 6 6 110\n 7 7 7 111\n 8 8 10 1000\n 9 9 11 1001\n 10 A 12 1010\n 11 B 13 1011\n',
'function': '\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n | "*" [parameter] ("," defparameter)* ["," "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the ""*"" must also have a default value --- this\nis a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated from left to right when the\nfunction definition is executed.** This means that the expression is\nevaluated once, when the function is defined, and that the same "pre-\ncomputed" value is used for each call. This is especially important\nto understand when a default parameter is a mutable object, such as a\nlist or a dictionary: if the function modifies the object (e.g. by\nappending an item to a list), the default value is in effect modified.\nThis is generally not what was intended. A way around this is to use\n"None" as the default, and explicitly test for it in the body of the\nfunction, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n""*identifier"" is present, it is initialized to a tuple receiving any\nexcess positional parameters, defaulting to the empty tuple. If the\nform ""**identifier"" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after ""*"" or ""*identifier"" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "": expression"" following\nthe parameter name. Any parameter may have an annotation even those\nof the form "*identifier" or "**identifier". Functions may have\n"return" annotation of the form ""-> expression"" after the parameter\nlist. These annotations can be any valid Python expression and are\nevaluated when the function definition is executed. Annotations may\nbe evaluated in a different order than they appear in the source code.\nThe presence of annotations does not change the semantics of a\nfunction. The annotation values are available as values of a\ndictionary keyed by the parameters\' names in the "__annotations__"\nattribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section *Lambdas*. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nstatement executed inside a function definition defines a local\nfunction that can be returned or passed around. Free variables used\nin the nested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\nSee also: **PEP 3107** - Function Annotations\n\n The original specification for function annotations.\n',
'global': '\nThe "global" statement\n**********************\n\n global_stmt ::= "global" identifier ("," identifier)*\n\nThe "global" statement is a declaration which holds for the entire\ncurrent code block. It means that the listed identifiers are to be\ninterpreted as globals. It would be impossible to assign to a global\nvariable without "global", although free variables may refer to\nglobals without being declared global.\n\nNames listed in a "global" statement must not be used in the same code\nblock textually preceding that "global" statement.\n\nNames listed in a "global" statement must not be defined as formal\nparameters or in a "for" loop control target, "class" definition,\nfunction definition, or "import" statement.\n\n**CPython implementation detail:** The current implementation does not\nenforce the two restrictions, but programs should not abuse this\nfreedom, as future implementations may enforce them or silently change\nthe meaning of the program.\n\n**Programmer\'s note:** the "global" is a directive to the parser. It\napplies only to code parsed at the same time as the "global"\nstatement. In particular, a "global" statement contained in a string\nor code object supplied to the built-in "exec()" function does not\naffect the code block *containing* the function call, and code\ncontained in such a string is unaffected by "global" statements in the\ncode containing the function call. The same applies to the "eval()"\nand "compile()" functions.\n',
'id-classes': '\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n Not imported by "from module import *". The special identifier "_"\n is used in the interactive interpreter to store the result of the\n last evaluation; it is stored in the "builtins" module. When not\n in interactive mode, "_" has no special meaning and is not defined.\n See section *The import statement*.\n\n Note: The name "_" is often used in conjunction with\n internationalization; refer to the documentation for the\n "gettext" module for more information on this convention.\n\n"__*__"\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of "__*__" names, in any context, that does not\n follow explicitly documented use, is subject to breakage without\n warning.\n\n"__*"\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n',
'identifiers': '\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions.\n\nThe syntax of identifiers in Python is based on the Unicode standard\nannex UAX-31, with elaboration and changes as defined below; see also\n**PEP 3131** for further details.\n\nWithin the ASCII range (U+0001..U+007F), the valid characters for\nidentifiers are the same as in Python 2.x: the uppercase and lowercase\nletters "A" through "Z", the underscore "_" and, except for the first\ncharacter, the digits "0" through "9".\n\nPython 3.0 introduces additional characters from outside the ASCII\nrange (see **PEP 3131**). For these characters, the classification\nuses the version of the Unicode Character Database as included in the\n"unicodedata" module.\n\nIdentifiers are unlimited in length. Case is significant.\n\n identifier ::= xid_start xid_continue*\n id_start ::= <all characters in general categories Lu, Ll, Lt, Lm, Lo, Nl, the underscore, and characters with the Other_ID_Start property>\n id_continue ::= <all characters in id_start, plus characters in the categories Mn, Mc, Nd, Pc and others with the Other_ID_Continue property>\n xid_start ::= <all characters in id_start whose NFKC normalization is in "id_start xid_continue*">\n xid_continue ::= <all characters in id_continue whose NFKC normalization is in "id_continue*">\n\nThe Unicode category codes mentioned above stand for:\n\n* *Lu* - uppercase letters\n\n* *Ll* - lowercase letters\n\n* *Lt* - titlecase letters\n\n* *Lm* - modifier letters\n\n* *Lo* - other letters\n\n* *Nl* - letter numbers\n\n* *Mn* - nonspacing marks\n\n* *Mc* - spacing combining marks\n\n* *Nd* - decimal numbers\n\n* *Pc* - connector punctuations\n\n* *Other_ID_Start* - explicit list of characters in PropList.txt to\n support backwards compatibility\n\n* *Other_ID_Continue* - likewise\n\nAll identifiers are converted into the normal form NFKC while parsing;\ncomparison of identifiers is based on NFKC.\n\nA non-normative HTML file listing all valid identifier characters for\nUnicode 4.1 can be found at http://www.dcl.hpi.uni-\npotsdam.de/home/loewis/table-3131.html.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers. They must\nbe spelled exactly as written here:\n\n False class finally is return\n None continue for lambda try\n True def from nonlocal while\n and del global not with\n as elif if or yield\n assert else import pass\n break except in raise\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n Not imported by "from module import *". The special identifier "_"\n is used in the interactive interpreter to store the result of the\n last evaluation; it is stored in the "builtins" module. When not\n in interactive mode, "_" has no special meaning and is not defined.\n See section *The import statement*.\n\n Note: The name "_" is often used in conjunction with\n internationalization; refer to the documentation for the\n "gettext" module for more information on this convention.\n\n"__*__"\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of "__*__" names, in any context, that does not\n follow explicitly documented use, is subject to breakage without\n warning.\n\n"__*"\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n',
'if': '\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n',
'imaginary': '\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range. To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., "(3+4j)". Some examples of imaginary literals:\n\n 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n',
'import': '\nThe "import" statement\n**********************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nThe basic import statement (no "from" clause) is executed in two\nsteps:\n\n1. find a module, loading and initializing it if necessary\n\n2. define a name or names in the local namespace for the scope\n where the "import" statement occurs.\n\nWhen the statement contains multiple clauses (separated by commas) the\ntwo steps are carried out separately for each clause, just as though\nthe clauses had been separated out into individiual import statements.\n\nThe details of the first step, finding and loading modules are\ndescribed in greater detail in the section on the *import system*,\nwhich also describes the various types of packages and modules that\ncan be imported, as well as all the hooks that can be used to\ncustomize the import system. Note that failures in this step may\nindicate either that the module could not be located, *or* that an\nerror occurred while initializing the module, which includes execution\nof the module\'s code.\n\nIf the requested module is retrieved successfully, it will be made\navailable in the local namespace in one of three ways:\n\n* If the module name is followed by "as", then the name following\n "as" is bound directly to the imported module.\n\n* If no other name is specified, and the module being imported is a\n top level module, the module\'s name is bound in the local namespace\n as a reference to the imported module\n\n* If the module being imported is *not* a top level module, then the\n name of the top level package that contains the module is bound in\n the local namespace as a reference to the top level package. The\n imported module must be accessed using its full qualified name\n rather than directly\n\nThe "from" form uses a slightly more complex process:\n\n1. find the module specified in the "from" clause, loading and\n initializing it if necessary;\n\n2. for each of the identifiers specified in the "import" clauses:\n\n 1. check if the imported module has an attribute by that name\n\n 2. if not, attempt to import a submodule with that name and then\n check the imported module again for that attribute\n\n 3. if the attribute is not found, "ImportError" is raised.\n\n 4. otherwise, a reference to that value is stored in the local\n namespace, using the name in the "as" clause if it is present,\n otherwise using the attribute name\n\nExamples:\n\n import foo # foo imported and bound locally\n import foo.bar.baz # foo.bar.baz imported, foo bound locally\n import foo.bar.baz as fbb # foo.bar.baz imported and bound as fbb\n from foo.bar import baz # foo.bar.baz imported and bound as baz\n from foo import attr # foo imported and foo.attr bound as attr\n\nIf the list of identifiers is replaced by a star ("\'*\'"), all public\nnames defined in the module are bound in the local namespace for the\nscope where the "import" statement occurs.\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named "__all__"; if defined, it must\nbe a sequence of strings which are names defined or imported by that\nmodule. The names given in "__all__" are all considered public and\nare required to exist. If "__all__" is not defined, the set of public\nnames includes all names found in the module\'s namespace which do not\nbegin with an underscore character ("\'_\'"). "__all__" should contain\nthe entire public API. It is intended to avoid accidentally exporting\nitems that are not part of the API (such as library modules which were\nimported and used within the module).\n\nThe "from" form with "*" may only occur in a module scope. The wild\ncard form of import --- "from module import *" --- is only allowed at\nthe module level. Attempting to use it in class or function\ndefinitions will raise a "SyntaxError".\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after "from" you\ncan specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n"from . import mod" from a module in the "pkg" package then you will\nend up importing "pkg.mod". If you execute "from ..subpkg2 import mod"\nfrom within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The\nspecification for relative imports is contained within **PEP 328**.\n\n"importlib.import_module()" is provided to support applications that\ndetermine dynamically the modules to be loaded.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python where the feature\nbecomes standard.\n\nThe future statement is intended to ease migration to future versions\nof Python that introduce incompatible changes to the language. It\nallows use of the new features on a per-module basis before the\nrelease in which the feature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 3.0 are "absolute_import",\n"division", "generators", "unicode_literals", "print_function",\n"nested_scopes" and "with_statement". They are all redundant because\nthey are always enabled, and only kept for backwards compatibility.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module "__future__", described later, and it will\nbe imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by calls to the built-in functions "exec()" and\n"compile()" that occur in a module "M" containing a future statement\nwill, by default, use the new syntax or semantics associated with the\nfuture statement. This can be controlled by optional arguments to\n"compile()" --- see the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the *-i* option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also: **PEP 236** - Back to the __future__\n\n The original proposal for the __future__ mechanism.\n',
'in': '\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects need not have the same type. If both are\nnumbers, they are converted to a common type. Otherwise, the "==" and\n"!=" operators *always* consider objects of different types to be\nunequal, while the "<", ">", ">=" and "<=" operators raise a\n"TypeError" when comparing objects of different types that do not\nimplement these operators for the given pair of types. You can\ncontrol comparison behavior of objects of non-built-in types by\ndefining rich comparison methods like "__gt__()", described in section\n*Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values "float(\'NaN\')" and "Decimal(\'NaN\')" are special. The\n are identical to themselves, "x is x" but are not equal to\n themselves, "x != x". Additionally, comparing any value to a\n not-a-number value will return "False". For example, both "3 <\n float(\'NaN\')" and "float(\'NaN\') < 3" will return "False".\n\n* Bytes objects are compared lexicographically using the numeric\n values of their elements.\n\n* Strings are compared lexicographically using the numeric\n equivalents (the result of the built-in function "ord()") of their\n characters. [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison\n of corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, "[1,2,x] <= [1,2,y]" has the same\n value as "x <= y". If the corresponding element does not exist, the\n shorter sequence is ordered first (for example, "[1,2] < [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if they have the\n same "(key, value)" pairs. Order comparisons "(\'<\', \'<=\', \'>=\',\n \'>\')" raise "TypeError".\n\n* Sets and frozensets define comparison operators to mean subset and\n superset tests. Those relations do not define total orderings (the\n two sets "{1,2}" and {2,3} are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering.\n For example, "min()", "max()", and "sorted()" produce undefined\n results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they\n are the same object; the choice whether one object is considered\n smaller or larger than another one is made arbitrarily but\n consistently within one execution of a program.\n\nComparison of objects of differing types depends on whether either of\nthe types provide explicit support for the comparison. Most numeric\ntypes can be compared with one another. When cross-type comparison is\nnot supported, the comparison method returns "NotImplemented".\n\nThe operators "in" and "not in" test for membership. "x in s"\nevaluates to true if *x* is a member of *s*, and false otherwise. "x\nnot in s" returns the negation of "x in s". All built-in sequences\nand set types support this as well as dictionary, for which "in" tests\nwhether the dictionary has a given key. For container types such as\nlist, tuple, set, frozenset, dict, or collections.deque, the\nexpression "x in y" is equivalent to "any(x is e or x == e for e in\ny)".\n\nFor the string and bytes types, "x in y" is true if and only if *x* is\na substring of *y*. An equivalent test is "y.find(x) != -1". Empty\nstrings are always considered to be a substring of any other string,\nso """ in "abc"" will return "True".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [4]\n',
'integers': '\nInteger literals\n****************\n\nInteger literals are described by the following lexical definitions:\n\n integer ::= decimalinteger | octinteger | hexinteger | bininteger\n decimalinteger ::= nonzerodigit digit* | "0"+\n nonzerodigit ::= "1"..."9"\n digit ::= "0"..."9"\n octinteger ::= "0" ("o" | "O") octdigit+\n hexinteger ::= "0" ("x" | "X") hexdigit+\n bininteger ::= "0" ("b" | "B") bindigit+\n octdigit ::= "0"..."7"\n hexdigit ::= digit | "a"..."f" | "A"..."F"\n bindigit ::= "0" | "1"\n\nThere is no limit for the length of integer literals apart from what\ncan be stored in available memory.\n\nNote that leading zeros in a non-zero decimal number are not allowed.\nThis is for disambiguation with C-style octal literals, which Python\nused before version 3.0.\n\nSome examples of integer literals:\n\n 7 2147483647 0o177 0b100110111\n 3 79228162514264337593543950336 0o377 0x100000000\n 79228162514264337593543950336 0xdeadbeef\n',
'lambda': '\nLambdas\n*******\n\n lambda_expr ::= "lambda" [parameter_list]: expression\n lambda_expr_nocond ::= "lambda" [parameter_list]: expression_nocond\n\nLambda expressions (sometimes called lambda forms) are used to create\nanonymous functions. The expression "lambda arguments: expression"\nyields a function object. The unnamed object behaves like a function\nobject defined with\n\n def <lambda>(arguments):\n return expression\n\nSee section *Function definitions* for the syntax of parameter lists.\nNote that functions created with lambda expressions cannot contain\nstatements or annotations.\n',
'lists': '\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n list_display ::= "[" [expression_list | comprehension] "]"\n\nA list display yields a new list object, the contents being specified\nby either a list of expressions or a comprehension. When a comma-\nseparated list of expressions is supplied, its elements are evaluated\nfrom left to right and placed into the list object in that order.\nWhen a comprehension is supplied, the list is constructed from the\nelements resulting from the comprehension.\n',
'naming': '\nNaming and binding\n******************\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\nas a command line argument to the interpreter) is a code block. A\nscript command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The string argument passed\nto the built-in functions "eval()" and "exec()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes comprehensions and generator\nexpressions since they are implemented using a function scope. This\nmeans that the following will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as "nonlocal". If a name is bound at the module\nlevel, it is a global variable. (The variables of the module code\nblock are local and global.) If a variable is used in a code block\nbut not defined there, it is a *free variable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, an\n"UnboundLocalError" exception is raised. "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, or after\n"as" in a "with" statement or "except" clause. The "import" statement\nof the form "from ... import *" binds all names defined in the\nimported module, except those beginning with an underscore. This form\nmay only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the "global" statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "builtins". The global namespace is searched first. If\nthe name is not found there, the builtins namespace is searched. The\nglobal statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "builtins"; when in any\nother module, "__builtins__" is an alias for the dictionary of the\n"builtins" module itself. "__builtins__" can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "builtins" module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n=================================\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n',
'nonlocal': '\nThe "nonlocal" statement\n************************\n\n nonlocal_stmt ::= "nonlocal" identifier ("," identifier)*\n\nThe "nonlocal" statement causes the listed identifiers to refer to\npreviously bound variables in the nearest enclosing scope excluding\nglobals. This is important because the default behavior for binding is\nto search the local namespace first. The statement allows\nencapsulated code to rebind variables outside of the local scope\nbesides the global (module) scope.\n\nNames listed in a "nonlocal" statement, unlike those listed in a\n"global" statement, must refer to pre-existing bindings in an\nenclosing scope (the scope in which a new binding should be created\ncannot be determined unambiguously).\n\nNames listed in a "nonlocal" statement must not collide with pre-\nexisting bindings in the local scope.\n\nSee also: **PEP 3104** - Access to Names in Outer Scopes\n\n The specification for the "nonlocal" statement.\n',
'numbers': '\nNumeric literals\n****************\n\nThere are three types of numeric literals: integers, floating point\nnumbers, and imaginary numbers. There are no complex literals\n(complex numbers can be formed by adding a real number and an\nimaginary number).\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator \'"-"\' and the\nliteral "1".\n',
'numeric-types': '\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()". Note\n that "__pow__()" should be defined to accept an optional third\n argument if the ternary version of the built-in "pow()" function is\n to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, if *x* is an instance of a class with an\n "__iadd__()" method, "x += y" is equivalent to "x = x.__iadd__(y)"\n . Otherwise, "x.__add__(y)" and "y.__radd__(x)" are considered, as\n with the evaluation of "x + y". In certain situations, augmented\n assignment can result in unexpected errors (see *Why does\n a_tuple[i] += [\'item\'] raise an exception when the addition\n works?*), but this behavior is in fact part of the data model.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n Called to implement the built-in functions "complex()", "int()",\n "float()" and "round()". Should return a value of the appropriate\n type.\n\nobject.__index__(self)\n\n Called to implement "operator.index()", and whenever Python needs\n to losslessly convert the numeric object to an integer object (such\n as in slicing, or in the built-in "bin()", "hex()" and "oct()"\n functions). Presence of this method indicates that the numeric\n object is an integer type. Must return an integer.\n\n Note: In order to have a coherent integer type class, when\n "__index__()" is defined "__int__()" should also be defined, and\n both should return the same value.\n',
'objects': '\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data. All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value. An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory. The \'"is"\' operator compares the\nidentity of two objects; the "id()" function returns an integer\nrepresenting its identity.\n\n**CPython implementation detail:** For CPython, "id(x)" is the memory\naddress where "x" is stored.\n\nAn object\'s type determines the operations that the object supports\n(e.g., "does it have a length?") and also defines the possible values\nfor objects of that type. The "type()" function returns an object\'s\ntype (which is an object itself). Like its identity, an object\'s\n*type* is also unchangeable. [1]\n\nThe *value* of some objects can change. Objects whose value can\nchange are said to be *mutable*; objects whose value is unchangeable\nonce they are created are called *immutable*. (The value of an\nimmutable container object that contains a reference to a mutable\nobject can change when the latter\'s value is changed; however the\ncontainer is still considered immutable, because the collection of\nobjects it contains cannot be changed. So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected. An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable.\n\n**CPython implementation detail:** CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references. See the documentation of the "gc" module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change. Do not depend\non immediate finalization of objects when they become unreachable (so\nyou should always close files explicitly).\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'"try"..."except"\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows. It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a "close()" method. Programs\nare strongly recommended to explicitly close such objects. The\n\'"try"..."finally"\' statement and the \'"with"\' statement provide\nconvenient ways to do this.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries. The references are part of a container\'s value. In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied. So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior. Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed. E.g., after "a = 1; b = 1",\n"a" and "b" may or may not refer to the same object with the value\none, depending on the implementation, but after "c = []; d = []", "c"\nand "d" are guaranteed to refer to two different, unique, newly\ncreated empty lists. (Note that "c = d = []" assigns the same object\nto both "c" and "d".)\n',
'operator-summary': '\nOperator precedence\n*******************\n\nThe following table summarizes the operator precedence in Python, from\nlowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for exponentiation, which\ngroups from right to left).\n\nNote that comparisons, membership tests, and identity tests, all have\nthe same precedence and have a left-to-right chaining feature as\ndescribed in the *Comparisons* section.\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| "lambda" | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| "if" -- "else" | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| "or" | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| "and" | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| "not" "x" | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| "in", "not in", "is", "is not", "<", "<=", ">", | Comparisons, including membership |\n| ">=", "!=", "==" | tests and identity tests |\n+-------------------------------------------------+---------------------------------------+\n| "|" | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| "^" | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| "&" | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| "<<", ">>" | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| "+", "-" | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| "*", "/", "//", "%" | Multiplication, division, remainder |\n| | [5] |\n+-------------------------------------------------+---------------------------------------+\n| "+x", "-x", "~x" | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| "**" | Exponentiation [6] |\n+-------------------------------------------------+---------------------------------------+\n| "x[index]", "x[index:index]", | Subscription, slicing, call, |\n| "x(arguments...)", "x.attribute" | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| "(expressions...)", "[expressions...]", "{key: | Binding or tuple display, list |\n| value...}", "{expressions...}" | display, dictionary display, set |\n| | display |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] While "abs(x%y) < abs(y)" is true mathematically, for floats\n it may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that "-1e-100 % 1e100" have the same\n sign as "1e100", the computed result is "-1e-100 + 1e100", which\n is numerically exactly equal to "1e100". The function\n "math.fmod()" returns a result whose sign matches the sign of the\n first argument instead, and so returns "-1e-100" in this case.\n Which approach is more appropriate depends on the application.\n\n[2] If x is very close to an exact integer multiple of y, it\'s\n possible for "x//y" to be one larger than "(x-x%y)//y" due to\n rounding. In such cases, Python returns the latter result, in\n order to preserve that "divmod(x,y)[0] * y + x % y" be very close\n to "x".\n\n[3] While comparisons between strings make sense at the byte\n level, they may be counter-intuitive to users. For example, the\n strings ""\\u00C7"" and ""\\u0327\\u0043"" compare differently, even\n though they both represent the same unicode character (LATIN\n CAPITAL LETTER C WITH CEDILLA). To compare strings in a human\n recognizable way, compare using "unicodedata.normalize()".\n\n[4] Due to automatic garbage-collection, free lists, and the\n dynamic nature of descriptors, you may notice seemingly unusual\n behaviour in certain uses of the "is" operator, like those\n involving comparisons between instance methods, or constants.\n Check their documentation for more info.\n\n[5] The "%" operator is also used for string formatting; the same\n precedence applies.\n\n[6] The power operator "**" binds less tightly than an arithmetic\n or bitwise unary operator on its right, that is, "2**-1" is "0.5".\n',
'pass': '\nThe "pass" statement\n********************\n\n pass_stmt ::= "pass"\n\n"pass" is a null operation --- when it is executed, nothing happens.\nIt is useful as a placeholder when a statement is required\nsyntactically, but no code needs to be executed, for example:\n\n def f(arg): pass # a function that does nothing (yet)\n\n class C: pass # a class with no methods (yet)\n',
'power': '\nThe power operator\n******************\n\nThe power operator binds more tightly than unary operators on its\nleft; it binds less tightly than unary operators on its right. The\nsyntax is:\n\n power ::= primary ["**" u_expr]\n\nThus, in an unparenthesized sequence of power and unary operators, the\noperators are evaluated from right to left (this does not constrain\nthe evaluation order for the operands): "-1**2" results in "-1".\n\nThe power operator has the same semantics as the built-in "pow()"\nfunction, when called with two arguments: it yields its left argument\nraised to the power of its right argument. The numeric arguments are\nfirst converted to a common type, and the result is of that type.\n\nFor int operands, the result has the same type as the operands unless\nthe second argument is negative; in that case, all arguments are\nconverted to float and a float result is delivered. For example,\n"10**2" returns "100", but "10**-2" returns "0.01".\n\nRaising "0.0" to a negative power results in a "ZeroDivisionError".\nRaising a negative number to a fractional power results in a "complex"\nnumber. (In earlier versions it raised a "ValueError".)\n',
'raise': '\nThe "raise" statement\n*********************\n\n raise_stmt ::= "raise" [expression ["from" expression]]\n\nIf no expressions are present, "raise" re-raises the last exception\nthat was active in the current scope. If no exception is active in\nthe current scope, a "RuntimeError" exception is raised indicating\nthat this is an error.\n\nOtherwise, "raise" evaluates the first expression as the exception\nobject. It must be either a subclass or an instance of\n"BaseException". If it is a class, the exception instance will be\nobtained when needed by instantiating the class with no arguments.\n\nThe *type* of the exception is the exception instance\'s class, the\n*value* is the instance itself.\n\nA traceback object is normally created automatically when an exception\nis raised and attached to it as the "__traceback__" attribute, which\nis writable. You can create an exception and set your own traceback in\none step using the "with_traceback()" exception method (which returns\nthe same exception instance, with its traceback set to its argument),\nlike so:\n\n raise Exception("foo occurred").with_traceback(tracebackobj)\n\nThe "from" clause is used for exception chaining: if given, the second\n*expression* must be another exception class or instance, which will\nthen be attached to the raised exception as the "__cause__" attribute\n(which is writable). If the raised exception is not handled, both\nexceptions will be printed:\n\n >>> try:\n ... print(1 / 0)\n ... except Exception as exc:\n ... raise RuntimeError("Something bad happened") from exc\n ...\n Traceback (most recent call last):\n File "<stdin>", line 2, in <module>\n ZeroDivisionError: int division or modulo by zero\n\n The above exception was the direct cause of the following exception:\n\n Traceback (most recent call last):\n File "<stdin>", line 4, in <module>\n RuntimeError: Something bad happened\n\nA similar mechanism works implicitly if an exception is raised inside\nan exception handler: the previous exception is then attached as the\nnew exception\'s "__context__" attribute:\n\n >>> try:\n ... print(1 / 0)\n ... except:\n ... raise RuntimeError("Something bad happened")\n ...\n Traceback (most recent call last):\n File "<stdin>", line 2, in <module>\n ZeroDivisionError: int division or modulo by zero\n\n During handling of the above exception, another exception occurred:\n\n Traceback (most recent call last):\n File "<stdin>", line 4, in <module>\n RuntimeError: Something bad happened\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information about handling exceptions is in section\n*The try statement*.\n',
'return': '\nThe "return" statement\n**********************\n\n return_stmt ::= "return" [expression_list]\n\n"return" may only occur syntactically nested in a function definition,\nnot within a nested class definition.\n\nIf an expression list is present, it is evaluated, else "None" is\nsubstituted.\n\n"return" leaves the current function call with the expression list (or\n"None") as return value.\n\nWhen "return" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nfunction.\n\nIn a generator function, the "return" statement indicates that the\ngenerator is done and will cause "StopIteration" to be raised. The\nreturned value (if any) is used as an argument to construct\n"StopIteration" and becomes the "StopIteration.value" attribute.\n',
'sequence-types': '\nEmulating container types\n*************************\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "get()", "clear()",\n"setdefault()", "pop()", "popitem()", "copy()", and "update()"\nbehaving similar to those for Python\'s standard dictionary objects.\nThe "collections" module provides a "MutableMapping" abstract base\nclass to help create those methods from a base set of "__getitem__()",\n"__setitem__()", "__delitem__()", and "keys()". Mutable sequences\nshould provide methods "append()", "count()", "index()", "extend()",\n"insert()", "pop()", "remove()", "reverse()" and "sort()", like Python\nstandard list objects. Finally, sequence types should implement\naddition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods "__add__()", "__radd__()",\n"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" described\nbelow; they should not define other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should search the mapping\'s keys; for sequences, it\nshould search through the values. It is further recommended that both\nmappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "keys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__bool__()" method and whose "__len__()" method\n returns zero is considered to be false in a Boolean context.\n\nobject.__length_hint__(self)\n\n Called to implement "operator.length_hint()". Should return an\n estimated length for the object (which may be greater or less than\n the actual length). The length must be an integer ">=" 0. This\n method is purely an optimization and is never required for\n correctness.\n\n New in version 3.4.\n\nNote: Slicing is done exclusively with the following three methods.\n A call like\n\n a[1:2] = b\n\n is translated to\n\n a[slice(1, 2, None)] = b\n\n and so forth. Missing slice items are always filled in with "None".\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method "keys()".\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see *this section in the\n language reference*.\n',
'shifting': '\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept integers as arguments. They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as floor division by "pow(2,n)".\nA left shift by *n* bits is defined as multiplication with "pow(2,n)".\n\nNote: In the current implementation, the right-hand operand is\n required to be at most "sys.maxsize". If the right-hand operand is\n larger than "sys.maxsize" an "OverflowError" exception is raised.\n',
'slicings': '\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list). Slicings may be used as expressions or as\ntargets in assignment or "del" statements. The syntax for a slicing:\n\n slicing ::= primary "[" slice_list "]"\n slice_list ::= slice_item ("," slice_item)* [","]\n slice_item ::= expression | proper_slice\n proper_slice ::= [lower_bound] ":" [upper_bound] [ ":" [stride] ]\n lower_bound ::= expression\n upper_bound ::= expression\n stride ::= expression\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing. Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice).\n\nThe semantics for a slicing are as follows. The primary must evaluate\nto a mapping object, and it is indexed (using the same "__getitem__()"\nmethod as normal subscription) with a key that is constructed from the\nslice list, as follows. If the slice list contains at least one\ncomma, the key is a tuple containing the conversion of the slice\nitems; otherwise, the conversion of the lone slice item is the key.\nThe conversion of a slice item that is an expression is that\nexpression. The conversion of a proper slice is a slice object (see\nsection *The standard type hierarchy*) whose "start", "stop" and\n"step" attributes are the values of the expressions given as lower\nbound, upper bound and stride, respectively, substituting "None" for\nmissing expressions.\n',
'specialattrs': '\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant. Some of these are not reported\nby the "dir()" built-in function.\n\nobject.__dict__\n\n A dictionary or other mapping object used to store an object\'s\n (writable) attributes.\n\ninstance.__class__\n\n The class to which a class instance belongs.\n\nclass.__bases__\n\n The tuple of base classes of a class object.\n\nclass.__name__\n\n The name of the class or type.\n\nclass.__qualname__\n\n The *qualified name* of the class or type.\n\n New in version 3.3.\n\nclass.__mro__\n\n This attribute is a tuple of classes that are considered when\n looking for base classes during method resolution.\n\nclass.mro()\n\n This method can be overridden by a metaclass to customize the\n method resolution order for its instances. It is called at class\n instantiation, and its result is stored in "__mro__".\n\nclass.__subclasses__()\n\n Each class keeps a list of weak references to its immediate\n subclasses. This method returns a list of all those references\n still alive. Example:\n\n >>> int.__subclasses__()\n [<class \'bool\'>]\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found\n in the Python Reference Manual (*Basic customization*).\n\n[2] As a consequence, the list "[1, 2]" is considered equal to\n "[1.0, 2.0]", and similarly for tuples.\n\n[3] They must have since the parser can\'t tell the type of the\n operands.\n\n[4] Cased characters are those with general category property\n being one of "Lu" (Letter, uppercase), "Ll" (Letter, lowercase),\n or "Lt" (Letter, titlecase).\n\n[5] To format only a tuple you should therefore provide a\n singleton tuple whose only element is the tuple to be formatted.\n',
'specialnames': '\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named "__getitem__()", and "x" is an instance of this class,\nthen "x[i]" is roughly equivalent to "type(x).__getitem__(x, i)".\nExcept where mentioned, attempts to execute an operation raise an\nexception when no appropriate method is defined (typically\n"AttributeError" or "TypeError").\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n"NodeList" interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])". As a special constraint on\n constructors, no value may be returned; doing so will cause a\n "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_info()[2]" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing "None" in\n "sys.last_traceback". Circular references which are garbage are\n detected and cleaned up when the cyclic garbage collector is\n enabled (it\'s on by default). Refer to the documentation for the\n "gc" module for more information about this topic.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function to compute the "official"\n string representation of an object. If at all possible, this\n should look like a valid Python expression that could be used to\n recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n "<...some useful description...>" should be returned. The return\n value must be a string object. If a class defines "__repr__()" but\n not "__str__()", then "__repr__()" is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by "str(object)" and the built-in functions "format()" and\n "print()" to compute the "informal" or nicely printable string\n representation of an object. The return value must be a *string*\n object.\n\n This method differs from "object.__repr__()" in that there is no\n expectation that "__str__()" return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type "object"\n calls "object.__repr__()".\n\nobject.__bytes__(self)\n\n Called by "bytes()" to compute a byte-string representation of an\n object. This should return a "bytes" object.\n\nobject.__format__(self, format_spec)\n\n Called by the "format()" built-in function (and by extension, the\n "str.format()" method of class "str") to produce a "formatted"\n string representation of an object. The "format_spec" argument is a\n string that contains a description of the formatting options\n desired. The interpretation of the "format_spec" argument is up to\n the type implementing "__format__()", however most classes will\n either delegate formatting to one of the built-in types, or use a\n similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\n Changed in version 3.4: The __format__ method of "object" itself\n raises a "TypeError" if passed any non-empty string.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: "x<y" calls "x.__lt__(y)", "x<=y" calls "x.__le__(y)",\n "x==y" calls "x.__eq__(y)", "x!=y" calls "x.__ne__(y)", "x>y" calls\n "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n Note: "hash()" truncates the value returned from an object\'s\n custom "__hash__()" method to the size of a "Py_ssize_t". This\n is typically 8 bytes on 64-bit builds and 4 bytes on 32-bit\n builds. If an object\'s "__hash__()" must interoperate on builds\n of different bit sizes, be sure to check the width on all\n supported builds. An easy way to do this is with "python -c\n "import sys; print(sys.hash_info.width)""\n\n If a class does not define an "__eq__()" method it should not\n define a "__hash__()" operation either; if it defines "__eq__()"\n but not "__hash__()", its instances will not be usable as items in\n hashable collections. If a class defines mutable objects and\n implements an "__eq__()" method, it should not implement\n "__hash__()", since the implementation of hashable collections\n requires that a key\'s hash value is immutable (if the object\'s hash\n value changes, it will be in the wrong hash bucket).\n\n User-defined classes have "__eq__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns an appropriate value such\n that "x == y" implies both that "x is y" and "hash(x) == hash(y)".\n\n A class that overrides "__eq__()" and does not define "__hash__()"\n will have its "__hash__()" implicitly set to "None". When the\n "__hash__()" method of a class is "None", instances of the class\n will raise an appropriate "TypeError" when a program attempts to\n retrieve their hash value, and will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable").\n\n If a class that overrides "__eq__()" needs to retain the\n implementation of "__hash__()" from a parent class, the interpreter\n must be told this explicitly by setting "__hash__ =\n <ParentClass>.__hash__".\n\n If a class that does not override "__eq__()" wishes to suppress\n hash support, it should include "__hash__ = None" in the class\n definition. A class which defines its own "__hash__()" that\n explicitly raises a "TypeError" would be incorrectly identified as\n hashable by an "isinstance(obj, collections.Hashable)" call.\n\n Note: By default, the "__hash__()" values of str, bytes and\n datetime objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also "PYTHONHASHSEED".\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True". When this method is not\n defined, "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__bool__()", all its instances are\n considered true.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control over attribute access.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special\n methods as the result of implicit invocation via language syntax\n or built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should call the base class method with the same name, for example,\n "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\nobject.__dir__(self)\n\n Called when "dir()" is called on the object. A sequence must be\n returned. "dir()" converts the returned sequence to a list and\n sorts it.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\nThe attribute "__objclass__" is interpreted by the "inspect" module as\nspecifying the class where this object was defined (setting this\nappropriately can assist in runtime introspection of dynamic class\nattributes). For callables, it may indicate that an instance of the\ngiven type (or a subclass) is expected or required as the first\npositional argument (for example, CPython sets this attribute for\nunbound methods that are implemented in C).\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to an object instance, "a.x" is transformed into the\n call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a class, "A.x" is transformed into the call:\n "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of classes have a dictionary for attribute\nstorage. This wastes space for objects having very few instance\nvariables. The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable. Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n class, *__slots__* reserves space for the declared variables and\n prevents the automatic creation of *__dict__* and *__weakref__* for\n each instance.\n\n\nNotes on using *__slots__*\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* Without a *__weakref__* variable for each instance, classes\n defining *__slots__* do not support weak references to its\n instances. If weak reference support is needed, then add\n "\'__weakref__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n instance variable defined by the base class slot is inaccessible\n (except by retrieving its descriptor directly from the base class).\n This renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "int", "bytes" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n may also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, classes are constructed using "type()". The class body is\nexecuted in a new namespace and the class name is bound locally to the\nresult of "type(name, bases, namespace)".\n\nThe class creation process can be customised by passing the\n"metaclass" keyword argument in the class definition line, or by\ninheriting from an existing class that included such an argument. In\nthe following example, both "MyClass" and "MySubclass" are instances\nof "Meta":\n\n class Meta(type):\n pass\n\n class MyClass(metaclass=Meta):\n pass\n\n class MySubclass(MyClass):\n pass\n\nAny other keyword arguments that are specified in the class definition\nare passed through to all metaclass operations described below.\n\nWhen a class definition is executed, the following steps occur:\n\n* the appropriate metaclass is determined\n\n* the class namespace is prepared\n\n* the class body is executed\n\n* the class object is created\n\n\nDetermining the appropriate metaclass\n-------------------------------------\n\nThe appropriate metaclass for a class definition is determined as\nfollows:\n\n* if no bases and no explicit metaclass are given, then "type()" is\n used\n\n* if an explicit metaclass is given and it is *not* an instance of\n "type()", then it is used directly as the metaclass\n\n* if an instance of "type()" is given as the explicit metaclass, or\n bases are defined, then the most derived metaclass is used\n\nThe most derived metaclass is selected from the explicitly specified\nmetaclass (if any) and the metaclasses (i.e. "type(cls)") of all\nspecified base classes. The most derived metaclass is one which is a\nsubtype of *all* of these candidate metaclasses. If none of the\ncandidate metaclasses meets that criterion, then the class definition\nwill fail with "TypeError".\n\n\nPreparing the class namespace\n-----------------------------\n\nOnce the appropriate metaclass has been identified, then the class\nnamespace is prepared. If the metaclass has a "__prepare__" attribute,\nit is called as "namespace = metaclass.__prepare__(name, bases,\n**kwds)" (where the additional keyword arguments, if any, come from\nthe class definition).\n\nIf the metaclass has no "__prepare__" attribute, then the class\nnamespace is initialised as an empty "dict()" instance.\n\nSee also: **PEP 3115** - Metaclasses in Python 3000\n\n Introduced the "__prepare__" namespace hook\n\n\nExecuting the class body\n------------------------\n\nThe class body is executed (approximately) as "exec(body, globals(),\nnamespace)". The key difference from a normal call to "exec()" is that\nlexical scoping allows the class body (including any methods) to\nreference names from the current and outer scopes when the class\ndefinition occurs inside a function.\n\nHowever, even when the class definition occurs inside the function,\nmethods defined inside the class still cannot see names defined at the\nclass scope. Class variables must be accessed through the first\nparameter of instance or class methods, and cannot be accessed at all\nfrom static methods.\n\n\nCreating the class object\n-------------------------\n\nOnce the class namespace has been populated by executing the class\nbody, the class object is created by calling "metaclass(name, bases,\nnamespace, **kwds)" (the additional keywords passed here are the same\nas those passed to "__prepare__").\n\nThis class object is the one that will be referenced by the zero-\nargument form of "super()". "__class__" is an implicit closure\nreference created by the compiler if any methods in a class body refer\nto either "__class__" or "super". This allows the zero argument form\nof "super()" to correctly identify the class being defined based on\nlexical scoping, while the class or instance that was used to make the\ncurrent call is identified based on the first argument passed to the\nmethod.\n\nAfter the class object is created, it is passed to the class\ndecorators included in the class definition (if any) and the resulting\nobject is bound in the local namespace as the defined class.\n\nSee also: **PEP 3135** - New super\n\n Describes the implicit "__class__" closure reference\n\n\nMetaclass example\n-----------------\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored include logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\nHere is an example of a metaclass that uses an\n"collections.OrderedDict" to remember the order that class variables\nare defined:\n\n class OrderedClass(type):\n\n @classmethod\n def __prepare__(metacls, name, bases, **kwds):\n return collections.OrderedDict()\n\n def __new__(cls, name, bases, namespace, **kwds):\n result = type.__new__(cls, name, bases, dict(namespace))\n result.members = tuple(namespace)\n return result\n\n class A(metaclass=OrderedClass):\n def one(self): pass\n def two(self): pass\n def three(self): pass\n def four(self): pass\n\n >>> A.members\n (\'__module__\', \'one\', \'two\', \'three\', \'four\')\n\nWhen the class definition for *A* gets executed, the process begins\nwith calling the metaclass\'s "__prepare__()" method which returns an\nempty "collections.OrderedDict". That mapping records the methods and\nattributes of *A* as they are defined within the body of the class\nstatement. Once those definitions are executed, the ordered dictionary\nis fully populated and the metaclass\'s "__new__()" method gets\ninvoked. That method builds the new type and it saves the ordered\ndictionary keys in an attribute called "members".\n\n\nCustomizing instance and subclass checks\n========================================\n\nThe following methods are used to override the default behavior of the\n"isinstance()" and "issubclass()" built-in functions.\n\nIn particular, the metaclass "abc.ABCMeta" implements these methods in\norder to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n "isinstance(instance, class)".\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n "issubclass(subclass, class)".\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also: **PEP 3119** - Introducing Abstract Base Classes\n\n Includes the specification for customizing "isinstance()" and\n "issubclass()" behavior through "__instancecheck__()" and\n "__subclasscheck__()", with motivation for this functionality in\n the context of adding Abstract Base Classes (see the "abc"\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "get()", "clear()",\n"setdefault()", "pop()", "popitem()", "copy()", and "update()"\nbehaving similar to those for Python\'s standard dictionary objects.\nThe "collections" module provides a "MutableMapping" abstract base\nclass to help create those methods from a base set of "__getitem__()",\n"__setitem__()", "__delitem__()", and "keys()". Mutable sequences\nshould provide methods "append()", "count()", "index()", "extend()",\n"insert()", "pop()", "remove()", "reverse()" and "sort()", like Python\nstandard list objects. Finally, sequence types should implement\naddition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods "__add__()", "__radd__()",\n"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" described\nbelow; they should not define other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should search the mapping\'s keys; for sequences, it\nshould search through the values. It is further recommended that both\nmappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "keys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__bool__()" method and whose "__len__()" method\n returns zero is considered to be false in a Boolean context.\n\nobject.__length_hint__(self)\n\n Called to implement "operator.length_hint()". Should return an\n estimated length for the object (which may be greater or less than\n the actual length). The length must be an integer ">=" 0. This\n method is purely an optimization and is never required for\n correctness.\n\n New in version 3.4.\n\nNote: Slicing is done exclusively with the following three methods.\n A call like\n\n a[1:2] = b\n\n is translated to\n\n a[slice(1, 2, None)] = b\n\n and so forth. Missing slice items are always filled in with "None".\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method "keys()".\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see *this section in the\n language reference*.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()". Note\n that "__pow__()" should be defined to accept an optional third\n argument if the ternary version of the built-in "pow()" function is\n to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, if *x* is an instance of a class with an\n "__iadd__()" method, "x += y" is equivalent to "x = x.__iadd__(y)"\n . Otherwise, "x.__add__(y)" and "y.__radd__(x)" are considered, as\n with the evaluation of "x + y". In certain situations, augmented\n assignment can result in unexpected errors (see *Why does\n a_tuple[i] += [\'item\'] raise an exception when the addition\n works?*), but this behavior is in fact part of the data model.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n Called to implement the built-in functions "complex()", "int()",\n "float()" and "round()". Should return a value of the appropriate\n type.\n\nobject.__index__(self)\n\n Called to implement "operator.index()", and whenever Python needs\n to losslessly convert the numeric object to an integer object (such\n as in slicing, or in the built-in "bin()", "hex()" and "oct()"\n functions). Presence of this method indicates that the numeric\n object is an integer type. Must return an integer.\n\n Note: In order to have a coherent integer type class, when\n "__index__()" is defined "__int__()" should also be defined, and\n both should return the same value.\n\n\nWith Statement Context Managers\n===============================\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section *The with\nstatement*), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n\n\nSpecial method lookup\n=====================\n\nFor custom classes, implicit invocations of special methods are only\nguaranteed to work correctly if defined on an object\'s type, not in\nthe object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception:\n\n >>> class C:\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as "__hash__()" and "__repr__()" that are implemented by\nall objects, including type objects. If the implicit lookup of these\nmethods used the conventional lookup process, they would fail when\ninvoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe "__getattribute__()" method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print("Metaclass getattribute invoked")\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object, metaclass=Meta):\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print("Class getattribute invoked")\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the "__getattribute__()" machinery in this fashion provides\nsignificant scope for speed optimisations within the interpreter, at\nthe cost of some flexibility in the handling of special methods (the\nspecial method *must* be set on the class object itself in order to be\nconsistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type,\n under certain controlled conditions. It generally isn\'t a good\n idea though, since it can lead to some very strange behaviour if\n it is handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as "__add__()") fails the operation is not\n supported, which is why the reflected method is not called.\n',
'string-methods': '\nString Methods\n**************\n\nStrings implement all of the *common* sequence operations, along with\nthe additional methods described below.\n\nStrings also support two styles of string formatting, one providing a\nlarge degree of flexibility and customization (see "str.format()",\n*Format String Syntax* and *String Formatting*) and the other based on\nC "printf" style formatting that handles a narrower range of types and\nis slightly harder to use correctly, but is often faster for the cases\nit can handle (*printf-style String Formatting*).\n\nThe *Text Processing Services* section of the standard library covers\na number of other modules that provide various text related utilities\n(including regular expression support in the "re" module).\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\nstr.casefold()\n\n Return a casefolded copy of the string. Casefolded strings may be\n used for caseless matching.\n\n Casefolding is similar to lowercasing but more aggressive because\n it is intended to remove all case distinctions in a string. For\n example, the German lowercase letter "\'\xc3\x9f\'" is equivalent to ""ss"".\n Since it is already lowercase, "lower()" would do nothing to "\'\xc3\x9f\'";\n "casefold()" converts it to ""ss"".\n\n The casefolding algorithm is described in section 3.13 of the\n Unicode Standard.\n\n New in version 3.3.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is an ASCII space). The\n original string is returned if *width* is less than or equal to\n "len(s)".\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.encode(encoding="utf-8", errors="strict")\n\n Return an encoded version of the string as a bytes object. Default\n encoding is "\'utf-8\'". *errors* may be given to set a different\n error handling scheme. The default for *errors* is "\'strict\'",\n meaning that encoding errors raise a "UnicodeError". Other possible\n values are "\'ignore\'", "\'replace\'", "\'xmlcharrefreplace\'",\n "\'backslashreplace\'" and any other name registered via\n "codecs.register_error()", see section *Codec Base Classes*. For a\n list of possible encodings, see section *Standard Encodings*.\n\n Changed in version 3.1: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return "True" if the string ends with the specified *suffix*,\n otherwise return "False". *suffix* can also be a tuple of suffixes\n to look for. With optional *start*, test beginning at that\n position. With optional *end*, stop comparing at that position.\n\nstr.expandtabs(tabsize=8)\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. Tab positions occur every *tabsize* characters\n (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n To expand the string, the current column is set to zero and the\n string is examined character by character. If the character is a\n tab ("\\t"), one or more space characters are inserted in the result\n until the current column is equal to the next tab position. (The\n tab character itself is not copied.) If the character is a newline\n ("\\n") or return ("\\r"), it is copied and the current column is\n reset to zero. Any other character is copied unchanged and the\n current column is incremented by one regardless of how the\n character is represented when printed.\n\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n \'01 012 0123 01234\'\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n \'01 012 0123 01234\'\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" if *sub* is not found.\n\n Note: The "find()" method should be used only if you need to know\n the position of *sub*. To check if *sub* is a substring or not,\n use the "in" operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces "{}". Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\nstr.format_map(mapping)\n\n Similar to "str.format(**mapping)", except that "mapping" is used\n directly and not copied to a "dict". This is useful if for example\n "mapping" is a dict subclass:\n\n >>> class Default(dict):\n ... def __missing__(self, key):\n ... return key\n ...\n >>> \'{name} was born in {country}\'.format_map(Default(name=\'Guido\'))\n \'Guido was born in country\'\n\n New in version 3.2.\n\nstr.index(sub[, start[, end]])\n\n Like "find()", but raise "ValueError" when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise. A character "c"\n is alphanumeric if one of the following returns "True":\n "c.isalpha()", "c.isdecimal()", "c.isdigit()", or "c.isnumeric()".\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise. Alphabetic\n characters are those characters defined in the Unicode character\n database as "Letter", i.e., those with general category property\n being one of "Lm", "Lt", "Lu", "Ll", or "Lo". Note that this is\n different from the "Alphabetic" property defined in the Unicode\n Standard.\n\nstr.isdecimal()\n\n Return true if all characters in the string are decimal characters\n and there is at least one character, false otherwise. Decimal\n characters are those from general category "Nd". This category\n includes digit characters, and all characters that can be used to\n form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise. Digits include decimal\n characters and digits that need special handling, such as the\n compatibility superscript digits. Formally, a digit is a character\n that has the property value Numeric_Type=Digit or\n Numeric_Type=Decimal.\n\nstr.isidentifier()\n\n Return true if the string is a valid identifier according to the\n language definition, section *Identifiers and keywords*.\n\n Use "keyword.iskeyword()" to test for reserved identifiers such as\n "def" and "class".\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\nstr.isnumeric()\n\n Return true if all characters in the string are numeric characters,\n and there is at least one character, false otherwise. Numeric\n characters include digit characters, and all characters that have\n the Unicode numeric value property, e.g. U+2155, VULGAR FRACTION\n ONE FIFTH. Formally, numeric characters are those with the\n property value Numeric_Type=Digit, Numeric_Type=Decimal or\n Numeric_Type=Numeric.\n\nstr.isprintable()\n\n Return true if all characters in the string are printable or the\n string is empty, false otherwise. Nonprintable characters are\n those characters defined in the Unicode character database as\n "Other" or "Separator", excepting the ASCII space (0x20) which is\n considered printable. (Note that printable characters in this\n context are those which should not be escaped when "repr()" is\n invoked on a string. It has no bearing on the handling of strings\n written to "sys.stdout" or "sys.stderr".)\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise. Whitespace\n characters are those characters defined in the Unicode character\n database as "Other" or "Separator" and those with bidirectional\n property being one of "WS", "B", or "S".\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. A "TypeError" will be raised if there are\n any non-string values in *iterable*, including "bytes" objects.\n The separator between elements is the string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is an ASCII\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n The lowercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\nstatic str.maketrans(x[, y[, z]])\n\n This static method returns a translation table usable for\n "str.translate()".\n\n If there is only one argument, it must be a dictionary mapping\n Unicode ordinals (integers) or characters (strings of length 1) to\n Unicode ordinals, strings (of arbitrary lengths) or None.\n Character keys will then be converted to ordinals.\n\n If there are two arguments, they must be strings of equal length,\n and in the resulting dictionary, each character in x will be mapped\n to the character at the same position in y. If there is a third\n argument, it must be a string, whose characters will be mapped to\n None in the result.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like "rfind()" but raises "ValueError" when the substring *sub* is\n not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is an ASCII\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\nstr.rsplit(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n "None", any whitespace string is a separator. Except for splitting\n from the right, "rsplit()" behaves like "split()" which is\n described in detail below.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\nstr.split(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most "maxsplit+1"\n elements). If *maxsplit* is not specified or "-1", then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']"). The *sep* argument\n may consist of multiple characters (for example,\n "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n empty string with a specified separator returns "[\'\']".\n\n For example:\n\n >>> \'1,2,3\'.split(\',\')\n [\'1\', \'2\', \'3\']\n >>> \'1,2,3\'.split(\',\', maxsplit=1)\n [\'1\', \'2 3\']\n >>> \'1,2,,3,\'.split(\',\')\n [\'1\', \'2\', \'\', \'3\', \'\']\n\n If *sep* is not specified or is "None", a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a "None" separator returns "[]".\n\n For example:\n\n >>> \'1 2 3\'.split()\n [\'1\', \'2\', \'3\']\n >>> \'1 2 3\'.split(maxsplit=1)\n [\'1\', \'2 3\']\n >>> \' 1 2 3 \'.split()\n [\'1\', \'2\', \'3\']\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. This method uses the *universal newlines* approach to\n splitting lines. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n For example:\n\n >>> \'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()\n [\'ab c\', \'\', \'de fg\', \'kl\']``\n >>> \'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines(keepends=True)\n [\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']\n\n Unlike "split()" when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line:\n\n >>> "".splitlines()\n []\n >>> "One line\\n".splitlines()\n [\'One line\']\n\n For comparison, "split(\'\\n\')" gives:\n\n >>> \'\'.split(\'\\n\')\n [\'\']\n >>> \'Two lines\\n\'.split(\'\\n\')\n [\'Two lines\', \'\']\n\nstr.startswith(prefix[, start[, end]])\n\n Return "True" if string starts with the *prefix*, otherwise return\n "False". *prefix* can also be a tuple of prefixes to look for.\n With optional *start*, test string beginning at that position.\n With optional *end*, stop comparing string at that position.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or "None", the *chars*\n argument defaults to removing whitespace. The *chars* argument is\n not a prefix or suffix; rather, all combinations of its values are\n stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa. Note that it is not necessarily true that\n "s.swapcase().swapcase() == s".\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n For example:\n\n >>> \'Hello world\'.title()\n \'Hello World\'\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\nstr.translate(map)\n\n Return a copy of the *s* where all characters have been mapped\n through the *map* which must be a dictionary of Unicode ordinals\n (integers) to Unicode ordinals, strings or "None". Unmapped\n characters are left untouched. Characters mapped to "None" are\n deleted.\n\n You can use "str.maketrans()" to create a translation map from\n character-to-character mappings in different formats.\n\n Note: An even more flexible approach is to create a custom\n character mapping codec using the "codecs" module (see\n "encodings.cp1251" for an example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that "str.upper().isupper()" might be\n "False" if "s" contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n The uppercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.zfill(width)\n\n Return a copy of the string left filled with ASCII "\'0\'" digits to\n make a string of length *width*. A leading sign prefix ("\'+\'"/"\'-\'"\n is handled by inserting the padding *after* the sign character\n rather than before. The original string is returned if *width* is\n less than or equal to "len(s)".\n\n For example:\n\n >>> "42".zfill(5)\n \'00042\'\n >>> "-42".zfill(5)\n \'-0042\'\n',
'strings': '\nString and Bytes literals\n*************************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "R" | "U"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'" | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | stringescapeseq\n longstringitem ::= longstringchar | stringescapeseq\n shortstringchar ::= <any source character except "\\" or newline or the quote>\n longstringchar ::= <any source character except "\\">\n stringescapeseq ::= "\\" <any source character>\n\n bytesliteral ::= bytesprefix(shortbytes | longbytes)\n bytesprefix ::= "b" | "B" | "br" | "Br" | "bR" | "BR" | "rb" | "rB" | "Rb" | "RB"\n shortbytes ::= "\'" shortbytesitem* "\'" | \'"\' shortbytesitem* \'"\'\n longbytes ::= "\'\'\'" longbytesitem* "\'\'\'" | \'"""\' longbytesitem* \'"""\'\n shortbytesitem ::= shortbyteschar | bytesescapeseq\n longbytesitem ::= longbyteschar | bytesescapeseq\n shortbyteschar ::= <any ASCII character except "\\" or newline or the quote>\n longbyteschar ::= <any ASCII character except "\\">\n bytesescapeseq ::= "\\" <any ASCII character>\n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the "stringprefix" or "bytesprefix"\nand the rest of the literal. The source character set is defined by\nthe encoding declaration; it is UTF-8 if no encoding declaration is\ngiven in the source file; see section *Encoding declarations*.\n\nIn plain English: Both types of literals can be enclosed in matching\nsingle quotes ("\'") or double quotes ("""). They can also be enclosed\nin matching groups of three single or double quotes (these are\ngenerally referred to as *triple-quoted strings*). The backslash\n("\\") character is used to escape characters that otherwise have a\nspecial meaning, such as newline, backslash itself, or the quote\ncharacter.\n\nBytes literals are always prefixed with "\'b\'" or "\'B\'"; they produce\nan instance of the "bytes" type instead of the "str" type. They may\nonly contain ASCII characters; bytes with a numeric value of 128 or\ngreater must be expressed with escapes.\n\nAs of Python 3.3 it is possible again to prefix unicode strings with a\n"u" prefix to simplify maintenance of dual 2.x and 3.x codebases.\n\nBoth string and bytes literals may optionally be prefixed with a\nletter "\'r\'" or "\'R\'"; such strings are called *raw strings* and treat\nbackslashes as literal characters. As a result, in string literals,\n"\'\\U\'" and "\'\\u\'" escapes in raw strings are not treated specially.\nGiven that Python 2.x\'s raw unicode literals behave differently than\nPython 3.x\'s the "\'ur\'" syntax is not supported.\n\n New in version 3.3: The "\'rb\'" prefix of raw bytes literals has\n been added as a synonym of "\'br\'".\n\n New in version 3.3: Support for the unicode legacy literal\n ("u\'value\'") was reintroduced to simplify the maintenance of dual\n Python 2.x and 3.x codebases. See **PEP 414** for more information.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string. (A "quote" is the character used to open the\nstring, i.e. either "\'" or """.)\n\nUnless an "\'r\'" or "\'R\'" prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| "\\newline" | Backslash and newline ignored | |\n+-------------------+-----------------------------------+---------+\n| "\\\\" | Backslash ("\\") | |\n+-------------------+-----------------------------------+---------+\n| "\\\'" | Single quote ("\'") | |\n+-------------------+-----------------------------------+---------+\n| "\\"" | Double quote (""") | |\n+-------------------+-----------------------------------+---------+\n| "\\a" | ASCII Bell (BEL) | |\n+-------------------+-----------------------------------+---------+\n| "\\b" | ASCII Backspace (BS) | |\n+-------------------+-----------------------------------+---------+\n| "\\f" | ASCII Formfeed (FF) | |\n+-------------------+-----------------------------------+---------+\n| "\\n" | ASCII Linefeed (LF) | |\n+-------------------+-----------------------------------+---------+\n| "\\r" | ASCII Carriage Return (CR) | |\n+-------------------+-----------------------------------+---------+\n| "\\t" | ASCII Horizontal Tab (TAB) | |\n+-------------------+-----------------------------------+---------+\n| "\\v" | ASCII Vertical Tab (VT) | |\n+-------------------+-----------------------------------+---------+\n| "\\ooo" | Character with octal value *ooo* | (1,3) |\n+-------------------+-----------------------------------+---------+\n| "\\xhh" | Character with hex value *hh* | (2,3) |\n+-------------------+-----------------------------------+---------+\n\nEscape sequences only recognized in string literals are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| "\\N{name}" | Character named *name* in the | (4) |\n| | Unicode database | |\n+-------------------+-----------------------------------+---------+\n| "\\uxxxx" | Character with 16-bit hex value | (5) |\n| | *xxxx* | |\n+-------------------+-----------------------------------+---------+\n| "\\Uxxxxxxxx" | Character with 32-bit hex value | (6) |\n| | *xxxxxxxx* | |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. As in Standard C, up to three octal digits are accepted.\n\n2. Unlike in Standard C, exactly two hex digits are required.\n\n3. In a bytes literal, hexadecimal and octal escapes denote the\n byte with the given value. In a string literal, these escapes\n denote a Unicode character with the given value.\n\n4. Changed in version 3.3: Support for name aliases [1] has been\n added.\n\n5. Individual code units which form parts of a surrogate pair can\n be encoded using this escape sequence. Exactly four hex digits are\n required.\n\n6. Any Unicode character can be encoded this way. Exactly eight\n hex digits are required.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences only recognized in string\nliterals fall into the category of unrecognized escapes for bytes\nliterals.\n\nEven in a raw string, string quotes can be escaped with a backslash,\nbut the backslash remains in the string; for example, "r"\\""" is a\nvalid string literal consisting of two characters: a backslash and a\ndouble quote; "r"\\"" is not a valid string literal (even a raw string\ncannot end in an odd number of backslashes). Specifically, *a raw\nstring cannot end in a single backslash* (since the backslash would\nescape the following quote character). Note also that a single\nbackslash followed by a newline is interpreted as those two characters\nas part of the string, *not* as a line continuation.\n',
'subscriptions': '\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object that supports subscription\n(lists or dictionaries for example). User-defined objects can support\nsubscription by defining a "__getitem__()" method.\n\nFor built-in objects, there are two types of objects that support\nsubscription:\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey. (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to\nan integer or a slice (as discussed in the following section).\n\nThe formal syntax makes no special provision for negative indices in\nsequences; however, built-in sequences all provide a "__getitem__()"\nmethod that interprets negative indices by adding the length of the\nsequence to the index (so that "x[-1]" selects the last item of "x").\nThe resulting value must be a nonnegative integer less than the number\nof items in the sequence, and the subscription selects the item whose\nindex is that value (counting from zero). Since the support for\nnegative indices and slicing occurs in the object\'s "__getitem__()"\nmethod, subclasses overriding this method will need to explicitly add\nthat support.\n\nA string\'s items are characters. A character is not a separate data\ntype but a string of exactly one character.\n',
'truth': '\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an "if" or\n"while" condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* "None"\n\n* "False"\n\n* zero of any numeric type, for example, "0", "0.0", "0j".\n\n* any empty sequence, for example, "\'\'", "()", "[]".\n\n* any empty mapping, for example, "{}".\n\n* instances of user-defined classes, if the class defines a\n "__bool__()" or "__len__()" method, when that method returns the\n integer zero or "bool" value "False". [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn "0" or "False" for false and "1" or "True" for true, unless\notherwise stated. (Important exception: the Boolean operations "or"\nand "and" always return one of their operands.)\n',
'try': '\nThe "try" statement\n*******************\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" identifier]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the "as" keyword in that except clause, if\npresent, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using "as target", it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the "sys" module and can be accessed via\n"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of the\nexception class, the exception instance and a traceback object (see\nsection *The standard type hierarchy*) identifying the point in the\nprogram where the exception occurred. "sys.exc_info()" values are\nrestored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception it is re-raised at the end of the "finally"\nclause. If the "finally" clause raises another exception, the saved\nexception is set as the context of the new exception. If the "finally"\nclause executes a "return" or "break" statement, the saved exception\nis discarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the "raise" statement to\ngenerate exceptions may be found in section *The raise statement*.\n',
'types': '\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.), although such additions\nwill often be provided via the standard library instead.\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name "None". It\n is used to signify the absence of a value in many situations, e.g.,\n it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n "NotImplemented". Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the literal "..." or the\n built-in name "Ellipsis". Its truth value is true.\n\n"numbers.Number"\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n "numbers.Integral"\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are two types of integers:\n\n Integers ("int")\n\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans ("bool")\n These represent the truth values False and True. The two\n objects representing the values "False" and "True" are the\n only Boolean objects. The Boolean type is a subtype of the\n integer type, and Boolean values behave like the values 0 and\n 1, respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ""False"" or\n ""True"" are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers.\n\n "numbers.Real" ("float")\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these is\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n "numbers.Complex" ("complex")\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number "z" can be retrieved through the read-only\n attributes "z.real" and "z.imag".\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function "len()" returns the number of items\n of a sequence. When the length of a sequence is *n*, the index set\n contains the numbers 0, 1, ..., *n*-1. Item *i* of sequence *a* is\n selected by "a[i]".\n\n Sequences also support slicing: "a[i:j]" selects all items with\n index *k* such that *i* "<=" *k* "<" *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: "a[i:j:k]" selects all items of *a* with index *x* where\n "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n A string is a sequence of values that represent Unicode code\n points. All the code points in the range "U+0000 - U+10FFFF"\n can be represented in a string. Python doesn\'t have a "char"\n type; instead, every code point in the string is represented\n as a string object with length "1". The built-in function\n "ord()" converts a code point from its string form to an\n integer in the range "0 - 10FFFF"; "chr()" converts an\n integer in the range "0 - 10FFFF" to the corresponding length\n "1" string object. "str.encode()" can be used to convert a\n "str" to "bytes" using the given text encoding, and\n "bytes.decode()" can be used to achieve the opposite.\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Bytes\n A bytes object is an immutable array. The items are 8-bit\n bytes, represented by integers in the range 0 <= x < 256.\n Bytes literals (like "b\'abc\'") and the built-in function\n "bytes()" can be used to construct bytes objects. Also,\n bytes objects can be decoded to strings via the "decode()"\n method.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and "del" (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in "bytearray()" constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module "array" provides an additional example of a\n mutable sequence type, as does the "collections" module.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function "len()"\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., "1" and\n "1.0"), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n "set()" constructor and can be modified afterwards by several\n methods, such as "add()".\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in "frozenset()" constructor. As a frozenset is immutable\n and *hashable*, it can be used again as an element of another\n set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation "a[k]" selects the item indexed by "k"\n from the mapping "a"; this can be used in expressions and as the\n target of assignments or "del" statements. The built-in function\n "len()" returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., "1" and "1.0")\n then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the "{...}"\n notation (see section *Dictionary displays*).\n\n The extension modules "dbm.ndbm" and "dbm.gnu" provide\n additional examples of mapping types, as does the "collections"\n module.\n\nCallable types\n These are the types to which the function call operation (see\n section *Calls*) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section *Function definitions*). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +---------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +===========================+=================================+=============+\n | "__doc__" | The function\'s documentation | Writable |\n | | string, or "None" if | |\n | | unavailable | |\n +---------------------------+---------------------------------+-------------+\n | "__name__" | The function\'s name | Writable |\n +---------------------------+---------------------------------+-------------+\n | "__qualname__" | The function\'s *qualified name* | Writable |\n | | New in version 3.3. | |\n +---------------------------+---------------------------------+-------------+\n | "__module__" | The name of the module the | Writable |\n | | function was defined in, or | |\n | | "None" if unavailable. | |\n +---------------------------+---------------------------------+-------------+\n | "__defaults__" | A tuple containing default | Writable |\n | | argument values for those | |\n | | arguments that have defaults, | |\n | | or "None" if no arguments have | |\n | | a default value | |\n +---------------------------+---------------------------------+-------------+\n | "__code__" | The code object representing | Writable |\n | | the compiled function body. | |\n +---------------------------+---------------------------------+-------------+\n | "__globals__" | A reference to the dictionary | Read-only |\n | | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +---------------------------+---------------------------------+-------------+\n | "__dict__" | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +---------------------------+---------------------------------+-------------+\n | "__closure__" | "None" or a tuple of cells that | Read-only |\n | | contain bindings for the | |\n | | function\'s free variables. | |\n +---------------------------+---------------------------------+-------------+\n | "__annotations__" | A dict containing annotations | Writable |\n | | of parameters. The keys of the | |\n | | dict are the parameter names, | |\n | | and "\'return\'" for the return | |\n | | annotation, if provided. | |\n +---------------------------+---------------------------------+-------------+\n | "__kwdefaults__" | A dict containing defaults for | Writable |\n | | keyword-only parameters. | |\n +---------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n Instance methods\n An instance method object combines a class, a class instance and\n any callable object (normally a user-defined function).\n\n Special read-only attributes: "__self__" is the class instance\n object, "__func__" is the function object; "__doc__" is the\n method\'s documentation (same as "__func__.__doc__"); "__name__"\n is the method name (same as "__func__.__name__"); "__module__"\n is the name of the module the method was defined in, or "None"\n if unavailable.\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object or a class\n method object.\n\n When an instance method object is created by retrieving a user-\n defined function object from a class via one of its instances,\n its "__self__" attribute is the instance, and the method object\n is said to be bound. The new method\'s "__func__" attribute is\n the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the "__func__"\n attribute of the new instance is not the original method object\n but its "__func__" attribute.\n\n When an instance method object is created by retrieving a class\n method object from a class or instance, its "__self__" attribute\n is the class itself, and its "__func__" attribute is the\n function object underlying the class method.\n\n When an instance method object is called, the underlying\n function ("__func__") is called, inserting the class instance\n ("__self__") in front of the argument list. For instance, when\n "C" is a class which contains a definition for a function "f()",\n and "x" is an instance of "C", calling "x.f(1)" is equivalent to\n calling "C.f(x, 1)".\n\n When an instance method object is derived from a class method\n object, the "class instance" stored in "__self__" will actually\n be the class itself, so that calling either "x.f(1)" or "C.f(1)"\n is equivalent to calling "f(C,1)" where "f" is the underlying\n function.\n\n Note that the transformation from function object to instance\n method object happens each time the attribute is retrieved from\n the instance. In some cases, a fruitful optimization is to\n assign the attribute to a local variable and call that local\n variable. Also notice that this transformation only happens for\n user-defined functions; other callable objects (and all non-\n callable objects) are retrieved without transformation. It is\n also important to note that user-defined functions which are\n attributes of a class instance are not converted to bound\n methods; this *only* happens when the function is an attribute\n of the class.\n\n Generator functions\n A function or method which uses the "yield" statement (see\n section *The yield statement*) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s "iterator.__next__()" method will cause the\n function to execute until it provides a value using the "yield"\n statement. When the function executes a "return" statement or\n falls off the end, a "StopIteration" exception is raised and the\n iterator will have reached the end of the set of values to be\n returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are "len()" and "math.sin()"\n ("math" is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: "__doc__" is the function\'s documentation\n string, or "None" if unavailable; "__name__" is the function\'s\n name; "__self__" is set to "None" (but see the next item);\n "__module__" is the name of the module the function was defined\n in or "None" if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n "alist.append()", assuming *alist* is a list object. In this\n case, the special read-only attribute "__self__" is set to the\n object denoted by *alist*.\n\n Classes\n Classes are callable. These objects normally act as factories\n for new instances of themselves, but variations are possible for\n class types that override "__new__()". The arguments of the\n call are passed to "__new__()" and, in the typical case, to\n "__init__()" to initialize the new instance.\n\n Class Instances\n Instances of arbitrary classes can be made callable by defining\n a "__call__()" method in their class.\n\nModules\n Modules are a basic organizational unit of Python code, and are\n created by the *import system* as invoked either by the "import"\n statement (see "import"), or by calling functions such as\n "importlib.import_module()" and built-in "__import__()". A module\n object has a namespace implemented by a dictionary object (this is\n the dictionary referenced by the "__globals__" attribute of\n functions defined in the module). Attribute references are\n translated to lookups in this dictionary, e.g., "m.x" is equivalent\n to "m.__dict__["x"]". A module object does not contain the code\n object used to initialize the module (since it isn\'t needed once\n the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n\n Special read-only attribute: "__dict__" is the module\'s namespace\n as a dictionary object.\n\n **CPython implementation detail:** Because of the way CPython\n clears module dictionaries, the module dictionary will be cleared\n when the module falls out of scope even if the dictionary still has\n live references. To avoid this, copy the dictionary or keep the\n module around while using its dictionary directly.\n\n Predefined (writable) attributes: "__name__" is the module\'s name;\n "__doc__" is the module\'s documentation string, or "None" if\n unavailable; "__file__" is the pathname of the file from which the\n module was loaded, if it was loaded from a file. The "__file__"\n attribute may be missing for certain types of modules, such as C\n modules that are statically linked into the interpreter; for\n extension modules loaded dynamically from a shared library, it is\n the pathname of the shared library file.\n\nCustom classes\n Custom class types are typically created by class definitions (see\n section *Class definitions*). A class has a namespace implemented\n by a dictionary object. Class attribute references are translated\n to lookups in this dictionary, e.g., "C.x" is translated to\n "C.__dict__["x"]" (although there are a number of hooks which allow\n for other means of locating attributes). When the attribute name is\n not found there, the attribute search continues in the base\n classes. This search of the base classes uses the C3 method\n resolution order which behaves correctly even in the presence of\n \'diamond\' inheritance structures where there are multiple\n inheritance paths leading back to a common ancestor. Additional\n details on the C3 MRO used by Python can be found in the\n documentation accompanying the 2.3 release at\n http://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class "C", say) would yield a\n class method object, it is transformed into an instance method\n object whose "__self__" attributes is "C". When it would yield a\n static method object, it is transformed into the object wrapped by\n the static method object. See section *Implementing Descriptors*\n for another way in which attributes retrieved from a class may\n differ from those actually contained in its "__dict__".\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: "__name__" is the class name; "__module__" is\n the module name in which the class was defined; "__dict__" is the\n dictionary containing the class\'s namespace; "__bases__" is a tuple\n (possibly empty or a singleton) containing the base classes, in the\n order of their occurrence in the base class list; "__doc__" is the\n class\'s documentation string, or None if undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object, it is transformed into an instance method object\n whose "__self__" attribute is the instance. Static method and\n class method objects are also transformed; see above under\n "Classes". See section *Implementing Descriptors* for another way\n in which attributes of a class retrieved via its instances may\n differ from the objects actually stored in the class\'s "__dict__".\n If no class attribute is found, and the object\'s class has a\n "__getattr__()" method, that is called to satisfy the lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n "__setattr__()" or "__delattr__()" method, this is called instead\n of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n *Special method names*.\n\n Special attributes: "__dict__" is the attribute dictionary;\n "__class__" is the instance\'s class.\n\nI/O objects (also known as file objects)\n A *file object* represents an open file. Various shortcuts are\n available to create file objects: the "open()" built-in function,\n and also "os.popen()", "os.fdopen()", and the "makefile()" method\n of socket objects (and perhaps by other functions or methods\n provided by extension modules).\n\n The objects "sys.stdin", "sys.stdout" and "sys.stderr" are\n initialized to file objects corresponding to the interpreter\'s\n standard input, output and error streams; they are all open in text\n mode and therefore follow the interface defined by the\n "io.TextIOBase" abstract class.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: "co_name" gives the function name;\n "co_argcount" is the number of positional arguments (including\n arguments with default values); "co_nlocals" is the number of\n local variables used by the function (including arguments);\n "co_varnames" is a tuple containing the names of the local\n variables (starting with the argument names); "co_cellvars" is a\n tuple containing the names of local variables that are\n referenced by nested functions; "co_freevars" is a tuple\n containing the names of free variables; "co_code" is a string\n representing the sequence of bytecode instructions; "co_consts"\n is a tuple containing the literals used by the bytecode;\n "co_names" is a tuple containing the names used by the bytecode;\n "co_filename" is the filename from which the code was compiled;\n "co_firstlineno" is the first line number of the function;\n "co_lnotab" is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); "co_stacksize" is the required stack size\n (including local variables); "co_flags" is an integer encoding a\n number of flags for the interpreter.\n\n The following flag bits are defined for "co_flags": bit "0x04"\n is set if the function uses the "*arguments" syntax to accept an\n arbitrary number of positional arguments; bit "0x08" is set if\n the function uses the "**keywords" syntax to accept arbitrary\n keyword arguments; bit "0x20" is set if the function is a\n generator.\n\n Future feature declarations ("from __future__ import division")\n also use bits in "co_flags" to indicate whether a code object\n was compiled with a particular feature enabled: bit "0x2000" is\n set if the function was compiled with future division enabled;\n bits "0x10" and "0x1000" were used in earlier versions of\n Python.\n\n Other bits in "co_flags" are reserved for internal use.\n\n If a code object represents a function, the first item in\n "co_consts" is the documentation string of the function, or\n "None" if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: "f_back" is to the previous stack\n frame (towards the caller), or "None" if this is the bottom\n stack frame; "f_code" is the code object being executed in this\n frame; "f_locals" is the dictionary used to look up local\n variables; "f_globals" is used for global variables;\n "f_builtins" is used for built-in (intrinsic) names; "f_lasti"\n gives the precise instruction (this is an index into the\n bytecode string of the code object).\n\n Special writable attributes: "f_trace", if not "None", is a\n function called at the start of each source code line (this is\n used by the debugger); "f_lineno" is the current line number of\n the frame --- writing to this from within a trace function jumps\n to the given line (only for the bottom-most frame). A debugger\n can implement a Jump command (aka Set Next Statement) by writing\n to f_lineno.\n\n Frame objects support one method:\n\n frame.clear()\n\n This method clears all references to local variables held by\n the frame. Also, if the frame belonged to a generator, the\n generator is finalized. This helps break reference cycles\n involving frame objects (for example when catching an\n exception and storing its traceback for later use).\n\n "RuntimeError" is raised if the frame is currently executing.\n\n New in version 3.4.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n *The try statement*.) It is accessible as the third item of the\n tuple returned by "sys.exc_info()". When the program contains no\n suitable handler, the stack trace is written (nicely formatted)\n to the standard error stream; if the interpreter is interactive,\n it is also made available to the user as "sys.last_traceback".\n\n Special read-only attributes: "tb_next" is the next level in the\n stack trace (towards the frame where the exception occurred), or\n "None" if there is no next level; "tb_frame" points to the\n execution frame of the current level; "tb_lineno" gives the line\n number where the exception occurred; "tb_lasti" indicates the\n precise instruction. The line number and last instruction in\n the traceback may differ from the line number of its frame\n object if the exception occurred in a "try" statement with no\n matching except clause or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices for "__getitem__()"\n methods. They are also created by the built-in "slice()"\n function.\n\n Special read-only attributes: "start" is the lower bound; "stop"\n is the upper bound; "step" is the step value; each is "None" if\n omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the slice that the slice object\n would describe if applied to a sequence of *length* items.\n It returns a tuple of three integers; respectively these are\n the *start* and *stop* indices and the *step* or stride\n length of the slice. Missing or out-of-bounds indices are\n handled in a manner consistent with regular slices.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n "staticmethod()" constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in "classmethod()" constructor.\n',
'typesfunctions': '\nFunctions\n*********\n\nFunction objects are created by function definitions. The only\noperation on a function object is to call it: "func(argument-list)".\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions. Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee *Function definitions* for more information.\n',
'typesmapping': '\nMapping Types --- "dict"\n************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built-\nin "list", "set", and "tuple" classes, and the "collections" module.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as "1" and "1.0") then they can be used interchangeably to index\nthe same dictionary entry. (Note however, that since computers store\nfloating-point numbers as approximations it is usually unwise to use\nthem as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of "key:\nvalue" pairs within braces, for example: "{\'jack\': 4098, \'sjoerd\':\n4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the "dict"\nconstructor.\n\nclass class dict(**kwarg)\nclass class dict(mapping, **kwarg)\nclass class dict(iterable, **kwarg)\n\n Return a new dictionary initialized from an optional positional\n argument and a possibly empty set of keyword arguments.\n\n If no positional argument is given, an empty dictionary is created.\n If a positional argument is given and it is a mapping object, a\n dictionary is created with the same key-value pairs as the mapping\n object. Otherwise, the positional argument must be an *iterable*\n object. Each item in the iterable must itself be an iterable with\n exactly two objects. The first object of each item becomes a key\n in the new dictionary, and the second object the corresponding\n value. If a key occurs more than once, the last value for that key\n becomes the corresponding value in the new dictionary.\n\n If keyword arguments are given, the keyword arguments and their\n values are added to the dictionary created from the positional\n argument. If a key being added is already present, the value from\n the keyword argument replaces the value from the positional\n argument.\n\n To illustrate, the following examples all return a dictionary equal\n to "{"one": 1, "two": 2, "three": 3}":\n\n >>> a = dict(one=1, two=2, three=3)\n >>> b = {\'one\': 1, \'two\': 2, \'three\': 3}\n >>> c = dict(zip([\'one\', \'two\', \'three\'], [1, 2, 3]))\n >>> d = dict([(\'two\', 2), (\'one\', 1), (\'three\', 3)])\n >>> e = dict({\'three\': 3, \'one\': 1, \'two\': 2})\n >>> a == b == c == d == e\n True\n\n Providing keyword arguments as in the first example only works for\n keys that are valid Python identifiers. Otherwise, any valid keys\n can be used.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a "KeyError" if\n *key* is not in the map.\n\n If a subclass of dict defines a method "__missing__()", if the\n key *key* is not present, the "d[key]" operation calls that\n method with the key *key* as argument. The "d[key]" operation\n then returns or raises whatever is returned or raised by the\n "__missing__(key)" call if the key is not present. No other\n operations or methods invoke "__missing__()". If "__missing__()"\n is not defined, "KeyError" is raised. "__missing__()" must be a\n method; it cannot be an instance variable:\n\n >>> class Counter(dict):\n ... def __missing__(self, key):\n ... return 0\n >>> c = Counter()\n >>> c[\'red\']\n 0\n >>> c[\'red\'] += 1\n >>> c[\'red\']\n 1\n\n See "collections.Counter" for a complete implementation\n including other methods helpful for accumulating and managing\n tallies.\n\n d[key] = value\n\n Set "d[key]" to *value*.\n\n del d[key]\n\n Remove "d[key]" from *d*. Raises a "KeyError" if *key* is not\n in the map.\n\n key in d\n\n Return "True" if *d* has a key *key*, else "False".\n\n key not in d\n\n Equivalent to "not key in d".\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for "iter(d.keys())".\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n classmethod fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n "fromkeys()" is a class method that returns a new dictionary.\n *value* defaults to "None".\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to "None", so\n that this method never raises a "KeyError".\n\n items()\n\n Return a new view of the dictionary\'s items ("(key, value)"\n pairs). See the *documentation of view objects*.\n\n keys()\n\n Return a new view of the dictionary\'s keys. See the\n *documentation of view objects*.\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a "KeyError" is raised.\n\n popitem()\n\n Remove and return an arbitrary "(key, value)" pair from the\n dictionary.\n\n "popitem()" is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling "popitem()" raises a "KeyError".\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to "None".\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return "None".\n\n "update()" accepts either another dictionary object or an\n iterable of key/value pairs (as tuples or other iterables of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: "d.update(red=1,\n blue=2)".\n\n values()\n\n Return a new view of the dictionary\'s values. See the\n *documentation of view objects*.\n\nSee also: "types.MappingProxyType" can be used to create a read-only\n view of a "dict".\n\n\nDictionary view objects\n=======================\n\nThe objects returned by "dict.keys()", "dict.values()" and\n"dict.items()" are *view objects*. They provide a dynamic view on the\ndictionary\'s entries, which means that when the dictionary changes,\nthe view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of "(key, value)") in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of "(value, key)" pairs using\n "zip()": "pairs = zip(d.values(), d.keys())". Another way to\n create the same list is "pairs = [(v, k) for (k, v) in d.items()]".\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a "RuntimeError" or fail to iterate over all entries.\n\nx in dictview\n\n Return "True" if *x* is in the underlying dictionary\'s keys, values\n or items (in the latter case, *x* should be a "(key, value)"\n tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that "(key, value)" pairs are unique\nand hashable, then the items view is also set-like. (Values views are\nnot treated as set-like since the entries are generally not unique.)\nFor set-like views, all of the operations defined for the abstract\nbase class "collections.abc.Set" are available (for example, "==",\n"<", or "^").\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.keys()\n >>> values = dishes.values()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n >>> keys ^ {\'sausage\', \'juice\'}\n {\'juice\', \'sausage\', \'bacon\', \'spam\'}\n',
'typesmethods': '\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as "append()" on lists)\nand class instance methods. Built-in methods are described with the\ntypes that support them.\n\nIf you access a method (a function defined in a class namespace)\nthrough an instance, you get a special object: a *bound method* (also\ncalled *instance method*) object. When called, it will add the "self"\nargument to the argument list. Bound methods have two special read-\nonly attributes: "m.__self__" is the object on which the method\noperates, and "m.__func__" is the function implementing the method.\nCalling "m(arg-1, arg-2, ..., arg-n)" is completely equivalent to\ncalling "m.__func__(m.__self__, arg-1, arg-2, ..., arg-n)".\n\nLike function objects, bound method objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object ("meth.__func__"), setting method\nattributes on bound methods is disallowed. Attempting to set an\nattribute on a method results in an "AttributeError" being raised. In\norder to set a method attribute, you need to explicitly set it on the\nunderlying function object:\n\n >>> class C:\n ... def method(self):\n ... pass\n ...\n >>> c = C()\n >>> c.method.whoami = \'my name is method\' # can\'t set on the method\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n AttributeError: \'method\' object has no attribute \'whoami\'\n >>> c.method.__func__.whoami = \'my name is method\'\n >>> c.method.whoami\n \'my name is method\'\n\nSee *The standard type hierarchy* for more information.\n',
'typesmodules': '\nModules\n*******\n\nThe only special operation on a module is attribute access: "m.name",\nwhere *m* is a module and *name* accesses a name defined in *m*\'s\nsymbol table. Module attributes can be assigned to. (Note that the\n"import" statement is not, strictly speaking, an operation on a module\nobject; "import foo" does not require a module object named *foo* to\nexist, rather it requires an (external) *definition* for a module\nnamed *foo* somewhere.)\n\nA special attribute of every module is "__dict__". This is the\ndictionary containing the module\'s symbol table. Modifying this\ndictionary will actually change the module\'s symbol table, but direct\nassignment to the "__dict__" attribute is not possible (you can write\n"m.__dict__[\'a\'] = 1", which defines "m.a" to be "1", but you can\'t\nwrite "m.__dict__ = {}"). Modifying "__dict__" directly is not\nrecommended.\n\nModules built into the interpreter are written like this: "<module\n\'sys\' (built-in)>". If loaded from a file, they are written as\n"<module \'os\' from \'/usr/local/lib/pythonX.Y/os.pyc\'>".\n',
'typesseq': '\nSequence Types --- "list", "tuple", "range"\n*******************************************\n\nThere are three basic sequence types: lists, tuples, and range\nobjects. Additional sequence types tailored for processing of *binary\ndata* and *text strings* are described in dedicated sections.\n\n\nCommon Sequence Operations\n==========================\n\nThe operations in the following table are supported by most sequence\ntypes, both mutable and immutable. The "collections.abc.Sequence" ABC\nis provided to make it easier to correctly implement these operations\non custom sequence types.\n\nThis table lists the sequence operations sorted in ascending priority\n(operations in the same box have the same priority). In the table,\n*s* and *t* are sequences of the same type, *n*, *i*, *j* and *k* are\nintegers and *x* is an arbitrary object that meets any type and value\nrestrictions imposed by *s*.\n\nThe "in" and "not in" operations have the same priorities as the\ncomparison operations. The "+" (concatenation) and "*" (repetition)\noperations have the same priority as the corresponding numeric\noperations.\n\n+----------------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+============================+==================================+============+\n| "x in s" | "True" if an item of *s* is | (1) |\n| | equal to *x*, else "False" | |\n+----------------------------+----------------------------------+------------+\n| "x not in s" | "False" if an item of *s* is | (1) |\n| | equal to *x*, else "True" | |\n+----------------------------+----------------------------------+------------+\n| "s + t" | the concatenation of *s* and *t* | (6)(7) |\n+----------------------------+----------------------------------+------------+\n| "s * n" or "n * s" | *n* shallow copies of *s* | (2)(7) |\n| | concatenated | |\n+----------------------------+----------------------------------+------------+\n| "s[i]" | *i*th item of *s*, origin 0 | (3) |\n+----------------------------+----------------------------------+------------+\n| "s[i:j]" | slice of *s* from *i* to *j* | (3)(4) |\n+----------------------------+----------------------------------+------------+\n| "s[i:j:k]" | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+----------------------------+----------------------------------+------------+\n| "len(s)" | length of *s* | |\n+----------------------------+----------------------------------+------------+\n| "min(s)" | smallest item of *s* | |\n+----------------------------+----------------------------------+------------+\n| "max(s)" | largest item of *s* | |\n+----------------------------+----------------------------------+------------+\n| "s.index(x[, i[, j]])" | index of the first occurrence of | (8) |\n| | *x* in *s* (at or after index | |\n| | *i* and before index *j*) | |\n+----------------------------+----------------------------------+------------+\n| "s.count(x)" | total number of occurrences of | |\n| | *x* in *s* | |\n+----------------------------+----------------------------------+------------+\n\nSequences of the same type also support comparisons. In particular,\ntuples and lists are compared lexicographically by comparing\ncorresponding elements. This means that to compare equal, every\nelement must compare equal and the two sequences must be of the same\ntype and have the same length. (For full details see *Comparisons* in\nthe language reference.)\n\nNotes:\n\n1. While the "in" and "not in" operations are used only for simple\n containment testing in the general case, some specialised sequences\n (such as "str", "bytes" and "bytearray") also use them for\n subsequence testing:\n\n >>> "gg" in "eggs"\n True\n\n2. Values of *n* less than "0" are treated as "0" (which yields an\n empty sequence of the same type as *s*). Note also that the copies\n are shallow; nested structures are not copied. This often haunts\n new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that "[[]]" is a one-element list containing\n an empty list, so all three elements of "[[]] * 3" are (pointers\n to) this single empty list. Modifying any of the elements of\n "lists" modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of\n the string: "len(s) + i" or "len(s) + j" is substituted. But note\n that "-0" is still "0".\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that "i <= k < j". If *i* or *j* is\n greater than "len(s)", use "len(s)". If *i* is omitted or "None",\n use "0". If *j* is omitted or "None", use "len(s)". If *i* is\n greater than or equal to *j*, the slice is empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index "x = i + n*k" such that "0 <= n <\n (j-i)/k". In other words, the indices are "i", "i+k", "i+2*k",\n "i+3*k" and so on, stopping when *j* is reached (but never\n including *j*). If *i* or *j* is greater than "len(s)", use\n "len(s)". If *i* or *j* are omitted or "None", they become "end"\n values (which end depends on the sign of *k*). Note, *k* cannot be\n zero. If *k* is "None", it is treated like "1".\n\n6. Concatenating immutable sequences always results in a new\n object. This means that building up a sequence by repeated\n concatenation will have a quadratic runtime cost in the total\n sequence length. To get a linear runtime cost, you must switch to\n one of the alternatives below:\n\n * if concatenating "str" objects, you can build a list and use\n "str.join()" at the end or else write to a "io.StringIO" instance\n and retrieve its value when complete\n\n * if concatenating "bytes" objects, you can similarly use\n "bytes.join()" or "io.BytesIO", or you can do in-place\n concatenation with a "bytearray" object. "bytearray" objects are\n mutable and have an efficient overallocation mechanism\n\n * if concatenating "tuple" objects, extend a "list" instead\n\n * for other types, investigate the relevant class documentation\n\n7. Some sequence types (such as "range") only support item\n sequences that follow specific patterns, and hence don\'t support\n sequence concatenation or repetition.\n\n8. "index" raises "ValueError" when *x* is not found in *s*. When\n supported, the additional arguments to the index method allow\n efficient searching of subsections of the sequence. Passing the\n extra arguments is roughly equivalent to using "s[i:j].index(x)",\n only without copying any data and with the returned index being\n relative to the start of the sequence rather than the start of the\n slice.\n\n\nImmutable Sequence Types\n========================\n\nThe only operation that immutable sequence types generally implement\nthat is not also implemented by mutable sequence types is support for\nthe "hash()" built-in.\n\nThis support allows immutable sequences, such as "tuple" instances, to\nbe used as "dict" keys and stored in "set" and "frozenset" instances.\n\nAttempting to hash an immutable sequence that contains unhashable\nvalues will result in "TypeError".\n\n\nMutable Sequence Types\n======================\n\nThe operations in the following table are defined on mutable sequence\ntypes. The "collections.abc.MutableSequence" ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, "bytearray" only\naccepts integers that meet the value restriction "0 <= x <= 255").\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | appends *x* to the end of the | |\n| | sequence (same as | |\n| | "s[len(s):len(s)] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.clear()" | removes all items from "s" (same | (5) |\n| | as "del s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.copy()" | creates a shallow copy of "s" | (5) |\n| | (same as "s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(t)" | extends *s* with the contents of | |\n| | *t* (same as "s[len(s):len(s)] = | |\n| | t") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | inserts *x* into *s* at the | |\n| | index given by *i* (same as | |\n| | "s[i:i] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | retrieves the item at *i* and | (2) |\n| | also removes it from *s* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | remove the first item from *s* | (3) |\n| | where "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (4) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to "-1", so that by default\n the last item is removed and returned.\n\n3. "remove" raises "ValueError" when *x* is not found in *s*.\n\n4. The "reverse()" method modifies the sequence in place for\n economy of space when reversing a large sequence. To remind users\n that it operates by side effect, it does not return the reversed\n sequence.\n\n5. "clear()" and "copy()" are included for consistency with the\n interfaces of mutable containers that don\'t support slicing\n operations (such as "dict" and "set")\n\n New in version 3.3: "clear()" and "copy()" methods.\n\n\nLists\n=====\n\nLists are mutable sequences, typically used to store collections of\nhomogeneous items (where the precise degree of similarity will vary by\napplication).\n\nclass class list([iterable])\n\n Lists may be constructed in several ways:\n\n * Using a pair of square brackets to denote the empty list: "[]"\n\n * Using square brackets, separating items with commas: "[a]",\n "[a, b, c]"\n\n * Using a list comprehension: "[x for x in iterable]"\n\n * Using the type constructor: "list()" or "list(iterable)"\n\n The constructor builds a list whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a list, a copy is made and\n returned, similar to "iterable[:]". For example, "list(\'abc\')"\n returns "[\'a\', \'b\', \'c\']" and "list( (1, 2, 3) )" returns "[1, 2,\n 3]". If no argument is given, the constructor creates a new empty\n list, "[]".\n\n Many other operations also produce lists, including the "sorted()"\n built-in.\n\n Lists implement all of the *common* and *mutable* sequence\n operations. Lists also provide the following additional method:\n\n sort(*, key=None, reverse=None)\n\n This method sorts the list in place, using only "<" comparisons\n between items. Exceptions are not suppressed - if any comparison\n operations fail, the entire sort operation will fail (and the\n list will likely be left in a partially modified state).\n\n "sort()" accepts two arguments that can only be passed by\n keyword (*keyword-only arguments*):\n\n *key* specifies a function of one argument that is used to\n extract a comparison key from each list element (for example,\n "key=str.lower"). The key corresponding to each item in the list\n is calculated once and then used for the entire sorting process.\n The default value of "None" means that list items are sorted\n directly without calculating a separate key value.\n\n The "functools.cmp_to_key()" utility is available to convert a\n 2.x style *cmp* function to a *key* function.\n\n *reverse* is a boolean value. If set to "True", then the list\n elements are sorted as if each comparison were reversed.\n\n This method modifies the sequence in place for economy of space\n when sorting a large sequence. To remind users that it operates\n by side effect, it does not return the sorted sequence (use\n "sorted()" to explicitly request a new sorted list instance).\n\n The "sort()" method is guaranteed to be stable. A sort is\n stable if it guarantees not to change the relative order of\n elements that compare equal --- this is helpful for sorting in\n multiple passes (for example, sort by department, then by salary\n grade).\n\n **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python makes the list appear\n empty for the duration, and raises "ValueError" if it can detect\n that the list has been mutated during a sort.\n\n\nTuples\n======\n\nTuples are immutable sequences, typically used to store collections of\nheterogeneous data (such as the 2-tuples produced by the "enumerate()"\nbuilt-in). Tuples are also used for cases where an immutable sequence\nof homogeneous data is needed (such as allowing storage in a "set" or\n"dict" instance).\n\nclass class tuple([iterable])\n\n Tuples may be constructed in a number of ways:\n\n * Using a pair of parentheses to denote the empty tuple: "()"\n\n * Using a trailing comma for a singleton tuple: "a," or "(a,)"\n\n * Separating items with commas: "a, b, c" or "(a, b, c)"\n\n * Using the "tuple()" built-in: "tuple()" or "tuple(iterable)"\n\n The constructor builds a tuple whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a tuple, it is returned\n unchanged. For example, "tuple(\'abc\')" returns "(\'a\', \'b\', \'c\')"\n and "tuple( [1, 2, 3] )" returns "(1, 2, 3)". If no argument is\n given, the constructor creates a new empty tuple, "()".\n\n Note that it is actually the comma which makes a tuple, not the\n parentheses. The parentheses are optional, except in the empty\n tuple case, or when they are needed to avoid syntactic ambiguity.\n For example, "f(a, b, c)" is a function call with three arguments,\n while "f((a, b, c))" is a function call with a 3-tuple as the sole\n argument.\n\n Tuples implement all of the *common* sequence operations.\n\nFor heterogeneous collections of data where access by name is clearer\nthan access by index, "collections.namedtuple()" may be a more\nappropriate choice than a simple tuple object.\n\n\nRanges\n======\n\nThe "range" type represents an immutable sequence of numbers and is\ncommonly used for looping a specific number of times in "for" loops.\n\nclass class range(stop)\nclass class range(start, stop[, step])\n\n The arguments to the range constructor must be integers (either\n built-in "int" or any object that implements the "__index__"\n special method). If the *step* argument is omitted, it defaults to\n "1". If the *start* argument is omitted, it defaults to "0". If\n *step* is zero, "ValueError" is raised.\n\n For a positive *step*, the contents of a range "r" are determined\n by the formula "r[i] = start + step*i" where "i >= 0" and "r[i] <\n stop".\n\n For a negative *step*, the contents of the range are still\n determined by the formula "r[i] = start + step*i", but the\n constraints are "i >= 0" and "r[i] > stop".\n\n A range object will be empty if "r[0]" does not meet the value\n constraint. Ranges do support negative indices, but these are\n interpreted as indexing from the end of the sequence determined by\n the positive indices.\n\n Ranges containing absolute values larger than "sys.maxsize" are\n permitted but some features (such as "len()") may raise\n "OverflowError".\n\n Range examples:\n\n >>> list(range(10))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n >>> list(range(1, 11))\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n >>> list(range(0, 30, 5))\n [0, 5, 10, 15, 20, 25]\n >>> list(range(0, 10, 3))\n [0, 3, 6, 9]\n >>> list(range(0, -10, -1))\n [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]\n >>> list(range(0))\n []\n >>> list(range(1, 0))\n []\n\n Ranges implement all of the *common* sequence operations except\n concatenation and repetition (due to the fact that range objects\n can only represent sequences that follow a strict pattern and\n repetition and concatenation will usually violate that pattern).\n\nThe advantage of the "range" type over a regular "list" or "tuple" is\nthat a "range" object will always take the same (small) amount of\nmemory, no matter the size of the range it represents (as it only\nstores the "start", "stop" and "step" values, calculating individual\nitems and subranges as needed).\n\nRange objects implement the "collections.abc.Sequence" ABC, and\nprovide features such as containment tests, element index lookup,\nslicing and support for negative indices (see *Sequence Types ---\nlist, tuple, range*):\n\n>>> r = range(0, 20, 2)\n>>> r\nrange(0, 20, 2)\n>>> 11 in r\nFalse\n>>> 10 in r\nTrue\n>>> r.index(10)\n5\n>>> r[5]\n10\n>>> r[:5]\nrange(0, 10, 2)\n>>> r[-1]\n18\n\nTesting range objects for equality with "==" and "!=" compares them as\nsequences. That is, two range objects are considered equal if they\nrepresent the same sequence of values. (Note that two range objects\nthat compare equal might have different "start", "stop" and "step"\nattributes, for example "range(0) == range(2, 1, 3)" or "range(0, 3,\n2) == range(0, 4, 2)".)\n\nChanged in version 3.2: Implement the Sequence ABC. Support slicing\nand negative indices. Test "int" objects for membership in constant\ntime instead of iterating through all items.\n\nChanged in version 3.3: Define \'==\' and \'!=\' to compare range objects\nbased on the sequence of values they define (instead of comparing\nbased on object identity).\n\nNew in version 3.3: The "start", "stop" and "step" attributes.\n',
'typesseq-mutable': '\nMutable Sequence Types\n**********************\n\nThe operations in the following table are defined on mutable sequence\ntypes. The "collections.abc.MutableSequence" ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, "bytearray" only\naccepts integers that meet the value restriction "0 <= x <= 255").\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | appends *x* to the end of the | |\n| | sequence (same as | |\n| | "s[len(s):len(s)] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.clear()" | removes all items from "s" (same | (5) |\n| | as "del s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.copy()" | creates a shallow copy of "s" | (5) |\n| | (same as "s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(t)" | extends *s* with the contents of | |\n| | *t* (same as "s[len(s):len(s)] = | |\n| | t") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | inserts *x* into *s* at the | |\n| | index given by *i* (same as | |\n| | "s[i:i] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | retrieves the item at *i* and | (2) |\n| | also removes it from *s* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | remove the first item from *s* | (3) |\n| | where "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (4) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to "-1", so that by default\n the last item is removed and returned.\n\n3. "remove" raises "ValueError" when *x* is not found in *s*.\n\n4. The "reverse()" method modifies the sequence in place for\n economy of space when reversing a large sequence. To remind users\n that it operates by side effect, it does not return the reversed\n sequence.\n\n5. "clear()" and "copy()" are included for consistency with the\n interfaces of mutable containers that don\'t support slicing\n operations (such as "dict" and "set")\n\n New in version 3.3: "clear()" and "copy()" methods.\n',
'unary': '\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary "-" (minus) operator yields the negation of its numeric\nargument.\n\nThe unary "+" (plus) operator yields its numeric argument unchanged.\n\nThe unary "~" (invert) operator yields the bitwise inversion of its\ninteger argument. The bitwise inversion of "x" is defined as\n"-(x+1)". It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n"TypeError" exception is raised.\n',
'while': '\nThe "while" statement\n*********************\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n',
'with': '\nThe "with" statement\n********************\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n value from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()"\n method returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n',
'yield': '\nThe "yield" statement\n*********************\n\n yield_stmt ::= yield_expression\n\nA "yield" statement is semantically equivalent to a *yield\nexpression*. The yield statement can be used to omit the parentheses\nthat would otherwise be required in the equivalent yield expression\nstatement. For example, the yield statements\n\n yield <expr>\n yield from <expr>\n\nare equivalent to the yield expression statements\n\n (yield <expr>)\n (yield from <expr>)\n\nYield expressions and statements are only used when defining a\n*generator* function, and are only used in the body of the generator\nfunction. Using yield in a function definition is sufficient to cause\nthat definition to create a generator function instead of a normal\nfunction.\n\nFor full details of "yield" semantics, refer to the *Yield\nexpressions* section.\n'}
|
lgpl-3.0
|
alaeddine10/ggrc-core
|
src/ggrc/models/relationship_types.py
|
1
|
21783
|
class RelationshipTypes(object):
@classmethod
def types(cls):
types = {}
for k, rt in RELATIONSHIP_TYPES.items():
types[k] = rt.copy()
types[k].update({ 'relationship_type': k })
return types
@classmethod
def get_type(cls, relationship_type_id):
return cls.types().get(relationship_type_id, None)
@classmethod
def valid_relationship_hash(cls, relationship_type, related_model, endpoint):
return dict(
relationship_type=relationship_type,
related_model=related_model,
related_model_endpoint=endpoint)
@classmethod
def valid_relationship(cls, obj_type, name, rel):
if 'symmetric' in rel and rel['symmetric']:
if rel['source_type'] == obj_type and rel['target_type'] == obj_type:
return cls.valid_relationship_hash(name, obj_type, 'both')
else:
if rel['source_type'] == obj_type:
return cls.valid_relationship_hash(
name, rel['target_type'], 'destination')
if rel['target_type'] == obj_type:
return cls.valid_relationship_hash(
name, rel['source_type'], 'source')
@classmethod
def valid_relationship_helper(cls, obj_type):
return [
cls.valid_relationship(obj_type, name, rel)
for name, rel in cls.types().items()]
@classmethod
def valid_relationships(cls, obj_type):
if not isinstance(obj_type, (str, unicode)):
if not isinstance(obj_type, type):
obj_type = obj_type.__class__
obj_type = obj_type.__name__
return [vr for vr in cls.valid_relationship_helper(obj_type) if vr]
RELATIONSHIP_TYPES = {
'data_asset_has_process': {
'source_type': "DataAsset",
'target_type': "Process",
'forward_phrase': "has",
'reverse_phrase': "is a process for",
'forward_description': "This data asset relies upon the following processes.",
'reverse_description': "This process supports the following data assets."
},
'data_asset_relies_upon_data_asset': {
'source_type': "DataAsset",
'target_type': "DataAsset",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This data asset relies upon the following data assets.",
'reverse_description': "This data asset supports the following data assets."
},
'data_asset_relies_upon_facility': {
'source_type': "DataAsset",
'target_type': "Facility",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This data asset relies upon the following facilities.",
'reverse_description': "This facility supports the following data assets."
},
'data_asset_relies_upon_system': {
'source_type': "DataAsset",
'target_type': "System",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This data asset relies upon the following systems.",
'reverse_description': "This system supports the following data assets."
},
'facility_has_process': {
'source_type': "Facility",
'target_type': "Process",
'forward_phrase': "has",
'reverse_phrase': "is a process for",
'forward_description': "This facility relies upon the following processes.",
'reverse_description': "This process supports the following facilities."
},
'facility_relies_upon_data_asset': {
'source_type': "Facility",
'target_type': "DataAsset",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This facility relies upon the following data assets.",
'reverse_description': "This data asset supports the following facilities."
},
'facility_relies_upon_facility': {
'source_type': "Facility",
'target_type': "Facility",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This facility relies upon the following facilities.",
'reverse_description': "This facility supports the following facilities."
},
'facility_relies_upon_system': {
'source_type': "Facility",
'target_type': "System",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This facility relies upon the following systems.",
'reverse_description': "This system supports the following facilities."
},
'market_has_process': {
'source_type': "Market",
'target_type': "Process",
'forward_phrase': "has",
'reverse_phrase': "is a process for",
'forward_description': "This market relies upon the following processes.",
'reverse_description': "This process supports the following markets."
},
'market_includes_market': {
'source_type': "Market",
'target_type': "Market",
'forward_phrase': "includes",
'reverse_phrase': "is included in",
'forward_description': "This market includes the following markets.",
'reverse_description': "This market is included in the following markets."
},
'market_relies_upon_data_asset': {
'source_type': "Market",
'target_type': "DataAsset",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This market relies upon the following data assets.",
'reverse_description': "This data asset supports the following markets."
},
'market_relies_upon_facility': {
'source_type': "Market",
'target_type': "Facility",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This market relies upon the following facilities.",
'reverse_description': "This facility supports the following markets."
},
'market_relies_upon_system': {
'source_type': "Market",
'target_type': "System",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This market relies upon the following systems.",
'reverse_description': "This system supports the following markets."
},
'org_group_has_process': {
'source_type': "OrgGroup",
'target_type': "Process",
'forward_phrase': "has",
'reverse_phrase': "is a process for",
'forward_description': "This org group relies upon the following processes.",
'reverse_description': "This process supports the following org groups."
},
'org_group_is_affiliated_with_org_group': {
'source_type': "OrgGroup",
'target_type': "OrgGroup",
'symmetric': True,
'forward_phrase': "is affiliated/collaborates with",
'reverse_phrase': "is affiliated/collaborates with",
'forward_description': "This org group is affiliated/collaborates with the following org groups.",
'reverse_description': "This org group is affiliated/collaborates with the following org groups."
},
'org_group_is_responsible_for_data_asset': {
'source_type': "OrgGroup",
'target_type': "DataAsset",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following data assets.",
'reverse_description': "This data asset is overseen by the following org groups."
},
'org_group_is_responsible_for_facility': {
'source_type': "OrgGroup",
'target_type': "Facility",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following facilities.",
'reverse_description': "This facility is overseen by the following org groups."
},
'org_group_is_responsible_for_market': {
'source_type': "OrgGroup",
'target_type': "Market",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following markets.",
'reverse_description': "This market is overseen by the following org groups."
},
'org_group_is_responsible_for_org_group': {
'source_type': "OrgGroup",
'target_type': "OrgGroup",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following org groups.",
'reverse_description': "This org group is overseen by the following org groups."
},
'org_group_is_responsible_for_process': {
'source_type': "OrgGroup",
'target_type': "Process",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following processes.",
'reverse_description': "This process is overseen by the following org groups."
},
'org_group_is_responsible_for_product': {
'source_type': "OrgGroup",
'target_type': "Product",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following products.",
'reverse_description': "This product is overseen by the following org groups."
},
'org_group_is_responsible_for_project': {
'source_type': "OrgGroup",
'target_type': "Project",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following projects.",
'reverse_description': "This project is overseen by the following org groups."
},
'org_group_is_responsible_for_system': {
'source_type': "OrgGroup",
'target_type': "System",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following systems.",
'reverse_description': "This system is overseen by the following org groups."
},
'org_group_relies_upon_data_asset': {
'source_type': "OrgGroup",
'target_type': "DataAsset",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This org group relies upon the following data assets.",
'reverse_description': "This data asset supports the following org groups."
},
'org_group_relies_upon_facility': {
'source_type': "OrgGroup",
'target_type': "Facility",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This org group relies upon the following facilities.",
'reverse_description': "This facility supports the following org groups."
},
'org_group_relies_upon_org_group': {
'source_type': "OrgGroup",
'target_type': "OrgGroup",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This org group relies upon the following org groups.",
'reverse_description': "This org group supports the following org groups."
},
'org_group_relies_upon_system': {
'source_type': "OrgGroup",
'target_type': "System",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This org group relies upon the following systems.",
'reverse_description': "This system supports the following org groups."
},
'product_has_process': {
'source_type': "Product",
'target_type': "Process",
'forward_phrase': "has",
'reverse_phrase': "is a process for",
'forward_description': "This product relies upon the following processes.",
'reverse_description': "This process supports the following products."
},
'product_is_affiliated_with_product': {
'source_type': "Product",
'target_type': "Product",
'symmetric': True,
'forward_phrase': "is affiliated/collaborates with",
'reverse_phrase': "is affiliated/collaborates with",
'forward_description': "This product is affiliated/collaborates with the following products.",
'reverse_description': "This product is affiliated/collaborates with the following products."
},
'product_is_sold_into_market': {
'source_type': "Product",
'target_type': "Market",
'forward_phrase': "is sold into",
'reverse_phrase': "is a market for",
'forward_description': "This product is sold into the following markets.",
'reverse_description': "This market is a market for the following products."
},
'product_relies_upon_data_asset': {
'source_type': "Product",
'target_type': "DataAsset",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This product relies upon the following data assets.",
'reverse_description': "This data asset supports the following products."
},
'product_relies_upon_facility': {
'source_type': "Product",
'target_type': "Facility",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This product relies upon the following facilities.",
'reverse_description': "This facility supports the following products."
},
'product_relies_upon_product': {
'source_type': "Product",
'target_type': "Product",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This product relies upon the following products.",
'reverse_description': "This product supports the following products."
},
'product_relies_upon_system': {
'source_type': "Product",
'target_type': "System",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This product relies upon the following systems.",
'reverse_description': "This system supports the following products."
},
'program_applies_to_data_asset': {
'source_type': "Program",
'target_type': "DataAsset",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following data assets.",
'reverse_description': "This data asset is within scope of the following programs."
},
'program_applies_to_facility': {
'source_type': "Program",
'target_type': "Facility",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following facilities.",
'reverse_description': "This facility is within scope of the following programs."
},
'program_applies_to_market': {
'source_type': "Program",
'target_type': "Market",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following markets.",
'reverse_description': "This market is within scope of the following programs."
},
'program_applies_to_org_group': {
'source_type': "Program",
'target_type': "OrgGroup",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following org groups.",
'reverse_description': "This org group is within scope of the following programs."
},
'program_applies_to_process': {
'source_type': "Program",
'target_type': "Process",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following processes.",
'reverse_description': "This process is within scope of the following programs."
},
'program_applies_to_product': {
'source_type': "Program",
'target_type': "Product",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following products.",
'reverse_description': "This product is within scope of the following programs."
},
'program_applies_to_project': {
'source_type': "Program",
'target_type': "Project",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following projects.",
'reverse_description': "This project is within scope of the following programs."
},
'program_applies_to_system': {
'source_type': "Program",
'target_type': "System",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following systems.",
'reverse_description': "This system is within scope of the following programs."
},
'project_has_process': {
'source_type': "Project",
'target_type': "Process",
'forward_phrase': "has",
'reverse_phrase': "is a process for",
'forward_description': "This project relies upon the following processes.",
'reverse_description': "This process supports the following projects."
},
'project_relies_upon_data_asset': {
'source_type': "Project",
'target_type': "DataAsset",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This project relies upon the following data assets.",
'reverse_description': "This data asset supports the following projects."
},
'project_relies_upon_facility': {
'source_type': "Project",
'target_type': "Facility",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This project relies upon the following facilities.",
'reverse_description': "This facility supports the following projects."
},
'project_relies_upon_system': {
'source_type': "Project",
'target_type': "System",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This project relies upon the following systems.",
'reverse_description': "This system supports the following projects."
},
'project_targets_data_asset': {
'source_type': "Project",
'target_type': "DataAsset",
'forward_phrase': "targets",
'reverse_phrase': "is targeted by",
'forward_description': "This project targets the following data assets.",
'reverse_description': "This data asset is targeted by the following projects."
},
'project_targets_facility': {
'source_type': "Project",
'target_type': "Facility",
'forward_phrase': "targets",
'reverse_phrase': "is targeted by",
'forward_description': "This project targets the following facilities.",
'reverse_description': "This facility is targeted by the following projects."
},
'project_targets_market': {
'source_type': "Project",
'target_type': "Market",
'forward_phrase': "targets",
'reverse_phrase': "is targeted by",
'forward_description': "This project targets the following markets.",
'reverse_description': "This market is targeted by the following projects."
},
'project_targets_org_group': {
'source_type': "Project",
'target_type': "OrgGroup",
'forward_phrase': "targets",
'reverse_phrase': "is targeted by",
'forward_description': "This project targets the following org groups.",
'reverse_description': "This org group is targeted by the following projects."
},
'project_targets_product': {
'source_type': "Project",
'target_type': "Product",
'forward_phrase': "targets",
'reverse_phrase': "is targeted by",
'forward_description': "This project targets the following products.",
'reverse_description': "This product is targeted by the following projects."
},
'risk_is_a_threat_to_data_asset': {
'source_type': "Risk",
'target_type': "DataAsset",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following data assets.",
'reverse_description': "This data asset is vulnerable to the following risks."
},
'risk_is_a_threat_to_facility': {
'source_type': "Risk",
'target_type': "Facility",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following facilities.",
'reverse_description': "This faciliy is vulnerable to the following risks."
},
'risk_is_a_threat_to_market': {
'source_type': "Risk",
'target_type': "Market",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following markets.",
'reverse_description': "This market is vulnerable to the following risks."
},
'risk_is_a_threat_to_org_group': {
'source_type': "Risk",
'target_type': "OrgGroup",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is not a threat to the following org groups.",
'reverse_description': "This org group is vulnerable to the following risks."
},
'risk_is_a_threat_to_process': {
'source_type': "Risk",
'target_type': "Process",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following processes.",
'reverse_description': "This process is vulnerable to the following risks."
},
'risk_is_a_threat_to_product': {
'source_type': "Risk",
'target_type': "Product",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following products.",
'reverse_description': "This product is vulnerable to the following risks."
},
'risk_is_a_threat_to_project': {
'source_type': "Risk",
'target_type': "Project",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following projects.",
'reverse_description': "This project is vulnerable to the following risks."
},
'risk_is_a_threat_to_system': {
'source_type': "Risk",
'target_type': "System",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following systems.",
'reverse_description': "This system is vulnerable to the following risks."
},
}
|
apache-2.0
|
hall-lab/bamkit
|
bamcleanheader.py
|
2
|
4109
|
#!/usr/bin/env python
import argparse, sys
from argparse import RawTextHelpFormatter
import pysam
__author__ = "Author (email@site.com)"
__version__ = "$Revision: 0.0.1 $"
__date__ = "$Date: 2013-05-09 14:31 $"
# --------------------------------------
# define functions
def get_args():
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description="\
bamcleanheader.py\n\
author: " + __author__ + "\n\
version: " + __version__ + "\n\
description: remove illegal and malformed fields from a BAM file's header")
parser.add_argument('-S', '--is_sam', required=False, action='store_true', help='input is SAM')
# parser.add_argument('-H', required=False, action='store_true', help='output header and quit')
parser.add_argument('input', nargs='*', type=str,
help='SAM/BAM file to inject header lines into. If \'-\' or absent then defaults to stdin.')
# parse the arguments
args = parser.parse_args()
# if no input, check if part of pipe and if so, read stdin.
if len(args.input) == 0:
if sys.stdin.isatty():
parser.print_help()
exit(1)
else:
args.input = ['-']
# send back the user input
return args
# extract read group information from header of original bam
def get_clean_header(bam):
clean_header_list = list()
for line in bam.text.split('\n'):
if len(line.rstrip()) == 0:
continue
v = line.rstrip().split('\t')
if v[0] == "@HD":
legal_fields = ['VN',
'SO']
elif v[0] == "@SQ":
legal_fields = ['SN',
'LN',
'AS',
'M5',
'SP',
'UR']
elif v[0] == "@RG":
legal_fields = ['ID',
'CN',
'DS',
'DT',
'FO',
'KS',
'LB',
'PG',
'PI',
'PL',
'PU',
'SM']
elif v[0] == "@PG":
legal_fields = ['ID',
'PN',
'CL',
'PP',
'DS',
'VN']
elif v[0] == "@CO":
# all fields are legal
clean_header_list.append(line.rstrip())
continue
# illegal tag
else:
continue
tag = dict(x.split(':',1) for x in v[1:])
tag_clean = dict()
for field in tag:
if (field in legal_fields
and tag[field].strip() != ""):
tag_clean[field] = tag[field]
clean_header_list.append(v[0] + '\t'
+ '\t'.join(field + ":" + tag[field] for field in tag))
return '\n'.join(clean_header_list)
# add read group info to header of new sam file
def bam_clean(bam, is_sam, header_only):
if is_sam:
in_bam = pysam.Samfile(bam, 'r', check_sq=False)
else:
in_bam = pysam.Samfile(bam, 'rb', check_sq=False)
# out_bam = pysam.Samfile('-', 'w', template=in_bam)
print get_clean_header(in_bam)
if not header_only:
for al in in_bam:
print al
# # this code leads to pipeing errors
# if not header_only:
# for al in in_bam:
# out_bam.write(al)
# out_bam.close()
# close the input file
in_bam.close()
return
# --------------------------------------
# main function
def main():
# parse the command line args
args = get_args()
# clean the header
for mybam in args.input:
bam_clean(mybam, args.is_sam, True)
# initialize the script
if __name__ == '__main__':
try:
sys.exit(main())
except IOError, e:
if e.errno != 32: # ignore SIGPIPE
raise
|
mit
|
yantrabuddhi/atomspace
|
tests/python/old/test_fishgram.py
|
48
|
6101
|
from unittest import TestCase
# uses Python mock, installed with "sudo easy_install mock"
# http://www.voidspace.org.uk/python/mock/
from mock import patch
import os
import tempfile
from opencog.atomspace import AtomSpace, TruthValue, Atom, Handle
from opencog.atomspace import types, is_a, get_type, get_type_name
import opencog.util
import fishgram, tree, adaptors
# run doctests
import doctest
doctest.testmod(fishgram)
# For testing python modules separate from all the other tests,
# go to bin/tests, then run "ctest -v -I 56,56" to run just the Python module
# tests. Use ctest to find out the right number if the test ordering changes
# for some reason.
def add_fishgram_data(atomspace):
a = atomspace
t = types
# load and parse a file, or just add some dummy nodes.
# it shouldn't be huge amounts of data, just enough to demonstrate
# that the code is functionally correct
class FishgramTest(TestCase):
def setUp(self):
self.space = AtomSpace()
self.fishgram = fishgram.Fishgram(self.space)
add_fishgram_data(self.space)
tempfd, self.tempfn = tempfile.mkstemp()
# close the temp file as Logger will want to manually
# open it
os.close(tempfd)
self.log = opencog.util.create_logger(self.tempfn)
def tearDown(self):
pass
def test_bfs(self):
# conj = (
# a.add(t.AtTimeLink, out=[a.add(t.TimeNode, '11210347010'), a.add(t.EvaluationLink, out=[a.add(t.PredicateNode, 'increased'), a.add(t.ListLink, out=[a.add(t.EvaluationLink, out=[a.add(t.PredicateNode, 'EnergyDemandGoal'), a.add(t.ListLink, out=[])])])])]),
# a.add(t.AtTimeLink, out=[a.add(t.TimeNode, '11210347000'), a.add(t.EvaluationLink, out=[a.add(t.PredicateNode, 'actionDone'), a.add(t.ListLink, out=[a.add(t.ExecutionLink, out=[a.add(t.GroundedSchemaNode, 'eat'), a.add(t.ListLink, out=[a.add(t.AccessoryNode, 'id_-54646')])])])])]),
# a.add(t.SequentialAndLink, out=[a.add(t.TimeNode, '11210347000'), a.add(t.TimeNode, '11210347010')])
# )
# conj = tuple(map(tree.tree_from_atom, conj))
#res = tree.find_conj(conj,a.get_atoms_by_type(t.Atom))
pass
def test_implications(self):
i = None #self.fishgram.implications()
self.assertEquals(i,None)
# to patch things from an actual module before instantiating
#@patch('fishgram.Fishgram.run')
@patch('fishgram.Fishgram.closed_bfs_layers')
def test_implications_w_patch(self,mock_bfs):
# you can set this to various values that closed_bfs_layers might sanely
# return, you can try insane values to test robustness
mock_bfs.return_value = []
i = self.fishgram.implications()
self.assertEquals(mock_bfs.called,True)
def test_notice_changes(self):
pass
#def test_make_psi_rule(self):
# T = tree.T
# a = self.space.add
# t = types
#
# bark = T('ExecutionLink',
# a(t.GroundedSchemaNode, name='bark'),
# T('ListLink')
# )
#
# goal = T('EvaluationLink',
# a(t.PredicateNode, name = 'EnergyDemandGoal')
# )
#
# action = T('AtTimeLink', 1,
# T('EvaluationLink',
# a(t.PredicateNode, name='actionDone'),
# T('ListLink',
# bark
# )
# )
# )
# seq_and = T('SequentialAndLink', 1, 2) # two TimeNodes
# result = T('AtTimeLink',
# 2,
# T('EvaluationLink',
# a(t.PredicateNode, name='increased'),
# T('ListLink',
# goal
# )
# )
# )
#
# premises = (action, seq_and)
# conclusion = result
#
# print 'premises, conclusion:'
# print premises
# print conclusion
# premises2, conclusion2 = self.fishgram.make_psi_rule(premises, conclusion)
# print premises2, conclusion2
#
# ideal_premises = (bark, )
# ideal_conclusion = goal
#
# self.assertEquals(premises2, ideal_premises)
# self.assertEquals(conclusion2, ideal_conclusion)
#
#def test_lookup_causal_patterns(self):
# T = tree.T
# a = self.space.add
# t = types
#
# bark = T('ExecutionLink',
# a(t.GroundedSchemaNode, name='bark'),
# T('ListLink')
# )
#
# action = T('AtTimeLink', 1,
# T('EvaluationLink',
# a(t.PredicateNode, name='actionDone'),
# T('ListLink',
# bark
# )
# )
# )
# seq_and = T('SequentialAndLink', 1, 2) # two TimeNodes
# increase = T('AtTimeLink',
# 2,
# T('EvaluationLink',
# a(t.PredicateNode, name='increased'),
# T('ListLink',
# T('EvaluationLink',
# a(t.PredicateNode, name = 'EnergyDemandGoal')
# )
# )
# )
# )
#
# # Add the pattern (still with some variables) into the ForestExtractor results, then see if it can be looked up correclty
# ideal_result = (action, seq_and, increase)
# for x in ideal_result:
# # No embedding actually needs to be recorded, as long as the tree itself is there
# self.fishgram.forest.tree_embeddings[x] = []
#
# result = next(self.fishgram.lookup_causal_patterns())
# print result
# assert tree.isomorphic_conjunctions(result, ideal_result)
|
agpl-3.0
|
3liz/qgis-wps4server
|
filters/PyWPS/tests/process_inits.py
|
1
|
3027
|
import os
import sys
pywpsPath = os.path.abspath(os.path.join(
os.path.split(os.path.abspath(__file__))[0], ".."))
sys.path.append(pywpsPath)
import pywps
import pywps.Process
import unittest
import os
from xml.dom import minidom
class RequestGetTestCase(unittest.TestCase):
inputs = None
getcapabilitiesrequest = "service=wps&request=getcapabilities"
getdescribeprocessrequest = "service=wps&request=describeprocess&version=1.0.0&identifier=dummyprocess"
getexecuterequest = "service=wps&request=execute&version=1.0.0&identifier=dummyprocess&datainputs=[input1=20;input2=10]"
wfsurl = "http://www2.dmsolutions.ca/cgi-bin/mswfs_gmap?version=1.0.0&request=getfeature&service=wfs&typename=park"
wpsns = "http://www.opengis.net/wps/1.0.0"
xmldom = None
def setUp(self):
sys.stderr = open("/dev/null", "w")
def testLoadProcessesFromClass(self):
"""Test, if we can load process as classes"""
class newClassProcess(pywps.Process.WPSProcess):
def __init__(self):
pywps.Process.WPSProcess.__init__(
self, identifier="foo", title="bar")
mypywps = pywps.Pywps(pywps.METHOD_GET)
inputs = mypywps.parseRequest(self.getcapabilitiesrequest)
mypywps.performRequest(self.inputs, [newClassProcess])
xmldom = minidom.parseString(mypywps.response)
self.assertTrue(
len(xmldom.getElementsByTagNameNS(self.wpsns, "Process")) > 0)
def testLoadProcessesAsInstance(self):
"""Test, if we can load process as instances"""
class newClassProcess(pywps.Process.WPSProcess):
def __init__(self):
pywps.Process.WPSProcess.__init__(
self, identifier="foo", title="bar")
mypywps = pywps.Pywps(pywps.METHOD_GET)
inputs = mypywps.parseRequest(self.getcapabilitiesrequest)
mypywps.performRequest(self.inputs, [newClassProcess()])
xmldom = minidom.parseString(mypywps.response)
self.assertTrue(
len(xmldom.getElementsByTagNameNS(self.wpsns, "Process")) > 0)
def testLoadProcessesFromEnvVar(self):
"""Test, if we can load processes set from PYWPS_PROCESSES
environment variable"""
self._setFromEnv()
mypywps = pywps.Pywps(pywps.METHOD_GET)
inputs = mypywps.parseRequest(self.getcapabilitiesrequest)
mypywps.performRequest(inputs)
xmldom = minidom.parseString(mypywps.response)
self.assertEquals(len(mypywps.request.processes), 14)
self.assertTrue(mypywps.request.getProcess("dummyprocess"))
def _setFromEnv(self):
os.putenv("PYWPS_PROCESSES", os.path.join(
pywpsPath, "tests", "processes"))
os.environ["PYWPS_PROCESSES"] = os.path.join(
pywpsPath, "tests", "processes")
if __name__ == "__main__":
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(RequestGetTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
|
gpl-3.0
|
alxgu/ansible
|
lib/ansible/modules/network/fortios/fortios_voip_profile.py
|
17
|
52734
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_voip_profile
short_description: Configure VoIP profiles in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify voip feature and profile category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
voip_profile:
description:
- Configure VoIP profiles.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
comment:
description:
- Comment.
name:
description:
- Profile name.
required: true
sccp:
description:
- SCCP.
suboptions:
block-mcast:
description:
- Enable/disable block multicast RTP connections.
choices:
- disable
- enable
log-call-summary:
description:
- Enable/disable log summary of SCCP calls.
choices:
- disable
- enable
log-violations:
description:
- Enable/disable logging of SCCP violations.
choices:
- disable
- enable
max-calls:
description:
- Maximum calls per minute per SCCP client (max 65535).
status:
description:
- Enable/disable SCCP.
choices:
- disable
- enable
verify-header:
description:
- Enable/disable verify SCCP header content.
choices:
- disable
- enable
sip:
description:
- SIP.
suboptions:
ack-rate:
description:
- ACK request rate limit (per second, per policy).
block-ack:
description:
- Enable/disable block ACK requests.
choices:
- disable
- enable
block-bye:
description:
- Enable/disable block BYE requests.
choices:
- disable
- enable
block-cancel:
description:
- Enable/disable block CANCEL requests.
choices:
- disable
- enable
block-geo-red-options:
description:
- Enable/disable block OPTIONS requests, but OPTIONS requests still notify for redundancy.
choices:
- disable
- enable
block-info:
description:
- Enable/disable block INFO requests.
choices:
- disable
- enable
block-invite:
description:
- Enable/disable block INVITE requests.
choices:
- disable
- enable
block-long-lines:
description:
- Enable/disable block requests with headers exceeding max-line-length.
choices:
- disable
- enable
block-message:
description:
- Enable/disable block MESSAGE requests.
choices:
- disable
- enable
block-notify:
description:
- Enable/disable block NOTIFY requests.
choices:
- disable
- enable
block-options:
description:
- Enable/disable block OPTIONS requests and no OPTIONS as notifying message for redundancy either.
choices:
- disable
- enable
block-prack:
description:
- Enable/disable block prack requests.
choices:
- disable
- enable
block-publish:
description:
- Enable/disable block PUBLISH requests.
choices:
- disable
- enable
block-refer:
description:
- Enable/disable block REFER requests.
choices:
- disable
- enable
block-register:
description:
- Enable/disable block REGISTER requests.
choices:
- disable
- enable
block-subscribe:
description:
- Enable/disable block SUBSCRIBE requests.
choices:
- disable
- enable
block-unknown:
description:
- Block unrecognized SIP requests (enabled by default).
choices:
- disable
- enable
block-update:
description:
- Enable/disable block UPDATE requests.
choices:
- disable
- enable
bye-rate:
description:
- BYE request rate limit (per second, per policy).
call-keepalive:
description:
- Continue tracking calls with no RTP for this many minutes.
cancel-rate:
description:
- CANCEL request rate limit (per second, per policy).
contact-fixup:
description:
- "Fixup contact anyway even if contact's IP:port doesn't match session's IP:port."
choices:
- disable
- enable
hnt-restrict-source-ip:
description:
- Enable/disable restrict RTP source IP to be the same as SIP source IP when HNT is enabled.
choices:
- disable
- enable
hosted-nat-traversal:
description:
- Hosted NAT Traversal (HNT).
choices:
- disable
- enable
info-rate:
description:
- INFO request rate limit (per second, per policy).
invite-rate:
description:
- INVITE request rate limit (per second, per policy).
ips-rtp:
description:
- Enable/disable allow IPS on RTP.
choices:
- disable
- enable
log-call-summary:
description:
- Enable/disable logging of SIP call summary.
choices:
- disable
- enable
log-violations:
description:
- Enable/disable logging of SIP violations.
choices:
- disable
- enable
malformed-header-allow:
description:
- Action for malformed Allow header.
choices:
- discard
- pass
- respond
malformed-header-call-id:
description:
- Action for malformed Call-ID header.
choices:
- discard
- pass
- respond
malformed-header-contact:
description:
- Action for malformed Contact header.
choices:
- discard
- pass
- respond
malformed-header-content-length:
description:
- Action for malformed Content-Length header.
choices:
- discard
- pass
- respond
malformed-header-content-type:
description:
- Action for malformed Content-Type header.
choices:
- discard
- pass
- respond
malformed-header-cseq:
description:
- Action for malformed CSeq header.
choices:
- discard
- pass
- respond
malformed-header-expires:
description:
- Action for malformed Expires header.
choices:
- discard
- pass
- respond
malformed-header-from:
description:
- Action for malformed From header.
choices:
- discard
- pass
- respond
malformed-header-max-forwards:
description:
- Action for malformed Max-Forwards header.
choices:
- discard
- pass
- respond
malformed-header-p-asserted-identity:
description:
- Action for malformed P-Asserted-Identity header.
choices:
- discard
- pass
- respond
malformed-header-rack:
description:
- Action for malformed RAck header.
choices:
- discard
- pass
- respond
malformed-header-record-route:
description:
- Action for malformed Record-Route header.
choices:
- discard
- pass
- respond
malformed-header-route:
description:
- Action for malformed Route header.
choices:
- discard
- pass
- respond
malformed-header-rseq:
description:
- Action for malformed RSeq header.
choices:
- discard
- pass
- respond
malformed-header-sdp-a:
description:
- Action for malformed SDP a line.
choices:
- discard
- pass
- respond
malformed-header-sdp-b:
description:
- Action for malformed SDP b line.
choices:
- discard
- pass
- respond
malformed-header-sdp-c:
description:
- Action for malformed SDP c line.
choices:
- discard
- pass
- respond
malformed-header-sdp-i:
description:
- Action for malformed SDP i line.
choices:
- discard
- pass
- respond
malformed-header-sdp-k:
description:
- Action for malformed SDP k line.
choices:
- discard
- pass
- respond
malformed-header-sdp-m:
description:
- Action for malformed SDP m line.
choices:
- discard
- pass
- respond
malformed-header-sdp-o:
description:
- Action for malformed SDP o line.
choices:
- discard
- pass
- respond
malformed-header-sdp-r:
description:
- Action for malformed SDP r line.
choices:
- discard
- pass
- respond
malformed-header-sdp-s:
description:
- Action for malformed SDP s line.
choices:
- discard
- pass
- respond
malformed-header-sdp-t:
description:
- Action for malformed SDP t line.
choices:
- discard
- pass
- respond
malformed-header-sdp-v:
description:
- Action for malformed SDP v line.
choices:
- discard
- pass
- respond
malformed-header-sdp-z:
description:
- Action for malformed SDP z line.
choices:
- discard
- pass
- respond
malformed-header-to:
description:
- Action for malformed To header.
choices:
- discard
- pass
- respond
malformed-header-via:
description:
- Action for malformed VIA header.
choices:
- discard
- pass
- respond
malformed-request-line:
description:
- Action for malformed request line.
choices:
- discard
- pass
- respond
max-body-length:
description:
- Maximum SIP message body length (0 meaning no limit).
max-dialogs:
description:
- Maximum number of concurrent calls/dialogs (per policy).
max-idle-dialogs:
description:
- Maximum number established but idle dialogs to retain (per policy).
max-line-length:
description:
- Maximum SIP header line length (78-4096).
message-rate:
description:
- MESSAGE request rate limit (per second, per policy).
nat-trace:
description:
- Enable/disable preservation of original IP in SDP i line.
choices:
- disable
- enable
no-sdp-fixup:
description:
- Enable/disable no SDP fix-up.
choices:
- disable
- enable
notify-rate:
description:
- NOTIFY request rate limit (per second, per policy).
open-contact-pinhole:
description:
- Enable/disable open pinhole for non-REGISTER Contact port.
choices:
- disable
- enable
open-record-route-pinhole:
description:
- Enable/disable open pinhole for Record-Route port.
choices:
- disable
- enable
open-register-pinhole:
description:
- Enable/disable open pinhole for REGISTER Contact port.
choices:
- disable
- enable
open-via-pinhole:
description:
- Enable/disable open pinhole for Via port.
choices:
- disable
- enable
options-rate:
description:
- OPTIONS request rate limit (per second, per policy).
prack-rate:
description:
- PRACK request rate limit (per second, per policy).
preserve-override:
description:
- "Override i line to preserve original IPS (default: append)."
choices:
- disable
- enable
provisional-invite-expiry-time:
description:
- Expiry time for provisional INVITE (10 - 3600 sec).
publish-rate:
description:
- PUBLISH request rate limit (per second, per policy).
refer-rate:
description:
- REFER request rate limit (per second, per policy).
register-contact-trace:
description:
- Enable/disable trace original IP/port within the contact header of REGISTER requests.
choices:
- disable
- enable
register-rate:
description:
- REGISTER request rate limit (per second, per policy).
rfc2543-branch:
description:
- Enable/disable support via branch compliant with RFC 2543.
choices:
- disable
- enable
rtp:
description:
- Enable/disable create pinholes for RTP traffic to traverse firewall.
choices:
- disable
- enable
ssl-algorithm:
description:
- Relative strength of encryption algorithms accepted in negotiation.
choices:
- high
- medium
- low
ssl-auth-client:
description:
- Require a client certificate and authenticate it with the peer/peergrp. Source user.peer.name user.peergrp.name.
ssl-auth-server:
description:
- Authenticate the server's certificate with the peer/peergrp. Source user.peer.name user.peergrp.name.
ssl-client-certificate:
description:
- Name of Certificate to offer to server if requested. Source vpn.certificate.local.name.
ssl-client-renegotiation:
description:
- Allow/block client renegotiation by server.
choices:
- allow
- deny
- secure
ssl-max-version:
description:
- Highest SSL/TLS version to negotiate.
choices:
- ssl-3.0
- tls-1.0
- tls-1.1
- tls-1.2
ssl-min-version:
description:
- Lowest SSL/TLS version to negotiate.
choices:
- ssl-3.0
- tls-1.0
- tls-1.1
- tls-1.2
ssl-mode:
description:
- SSL/TLS mode for encryption & decryption of traffic.
choices:
- off
- full
ssl-pfs:
description:
- SSL Perfect Forward Secrecy.
choices:
- require
- deny
- allow
ssl-send-empty-frags:
description:
- Send empty fragments to avoid attack on CBC IV (SSL 3.0 & TLS 1.0 only).
choices:
- enable
- disable
ssl-server-certificate:
description:
- Name of Certificate return to the client in every SSL connection. Source vpn.certificate.local.name.
status:
description:
- Enable/disable SIP.
choices:
- disable
- enable
strict-register:
description:
- Enable/disable only allow the registrar to connect.
choices:
- disable
- enable
subscribe-rate:
description:
- SUBSCRIBE request rate limit (per second, per policy).
unknown-header:
description:
- Action for unknown SIP header.
choices:
- discard
- pass
- respond
update-rate:
description:
- UPDATE request rate limit (per second, per policy).
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure VoIP profiles.
fortios_voip_profile:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
voip_profile:
state: "present"
comment: "Comment."
name: "default_name_4"
sccp:
block-mcast: "disable"
log-call-summary: "disable"
log-violations: "disable"
max-calls: "9"
status: "disable"
verify-header: "disable"
sip:
ack-rate: "13"
block-ack: "disable"
block-bye: "disable"
block-cancel: "disable"
block-geo-red-options: "disable"
block-info: "disable"
block-invite: "disable"
block-long-lines: "disable"
block-message: "disable"
block-notify: "disable"
block-options: "disable"
block-prack: "disable"
block-publish: "disable"
block-refer: "disable"
block-register: "disable"
block-subscribe: "disable"
block-unknown: "disable"
block-update: "disable"
bye-rate: "31"
call-keepalive: "32"
cancel-rate: "33"
contact-fixup: "disable"
hnt-restrict-source-ip: "disable"
hosted-nat-traversal: "disable"
info-rate: "37"
invite-rate: "38"
ips-rtp: "disable"
log-call-summary: "disable"
log-violations: "disable"
malformed-header-allow: "discard"
malformed-header-call-id: "discard"
malformed-header-contact: "discard"
malformed-header-content-length: "discard"
malformed-header-content-type: "discard"
malformed-header-cseq: "discard"
malformed-header-expires: "discard"
malformed-header-from: "discard"
malformed-header-max-forwards: "discard"
malformed-header-p-asserted-identity: "discard"
malformed-header-rack: "discard"
malformed-header-record-route: "discard"
malformed-header-route: "discard"
malformed-header-rseq: "discard"
malformed-header-sdp-a: "discard"
malformed-header-sdp-b: "discard"
malformed-header-sdp-c: "discard"
malformed-header-sdp-i: "discard"
malformed-header-sdp-k: "discard"
malformed-header-sdp-m: "discard"
malformed-header-sdp-o: "discard"
malformed-header-sdp-r: "discard"
malformed-header-sdp-s: "discard"
malformed-header-sdp-t: "discard"
malformed-header-sdp-v: "discard"
malformed-header-sdp-z: "discard"
malformed-header-to: "discard"
malformed-header-via: "discard"
malformed-request-line: "discard"
max-body-length: "71"
max-dialogs: "72"
max-idle-dialogs: "73"
max-line-length: "74"
message-rate: "75"
nat-trace: "disable"
no-sdp-fixup: "disable"
notify-rate: "78"
open-contact-pinhole: "disable"
open-record-route-pinhole: "disable"
open-register-pinhole: "disable"
open-via-pinhole: "disable"
options-rate: "83"
prack-rate: "84"
preserve-override: "disable"
provisional-invite-expiry-time: "86"
publish-rate: "87"
refer-rate: "88"
register-contact-trace: "disable"
register-rate: "90"
rfc2543-branch: "disable"
rtp: "disable"
ssl-algorithm: "high"
ssl-auth-client: "<your_own_value> (source user.peer.name user.peergrp.name)"
ssl-auth-server: "<your_own_value> (source user.peer.name user.peergrp.name)"
ssl-client-certificate: "<your_own_value> (source vpn.certificate.local.name)"
ssl-client-renegotiation: "allow"
ssl-max-version: "ssl-3.0"
ssl-min-version: "ssl-3.0"
ssl-mode: "off"
ssl-pfs: "require"
ssl-send-empty-frags: "enable"
ssl-server-certificate: "<your_own_value> (source vpn.certificate.local.name)"
status: "disable"
strict-register: "disable"
subscribe-rate: "106"
unknown-header: "discard"
update-rate: "108"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_voip_profile_data(json):
option_list = ['comment', 'name', 'sccp',
'sip']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def voip_profile(data, fos):
vdom = data['vdom']
voip_profile_data = data['voip_profile']
filtered_data = filter_voip_profile_data(voip_profile_data)
if voip_profile_data['state'] == "present":
return fos.set('voip',
'profile',
data=filtered_data,
vdom=vdom)
elif voip_profile_data['state'] == "absent":
return fos.delete('voip',
'profile',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_voip(data, fos):
login(data, fos)
if data['voip_profile']:
resp = voip_profile(data, fos)
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"voip_profile": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"comment": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"sccp": {"required": False, "type": "dict",
"options": {
"block-mcast": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"log-call-summary": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"log-violations": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"max-calls": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"verify-header": {"required": False, "type": "str",
"choices": ["disable", "enable"]}
}},
"sip": {"required": False, "type": "dict",
"options": {
"ack-rate": {"required": False, "type": "int"},
"block-ack": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block-bye": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block-cancel": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block-geo-red-options": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block-info": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block-invite": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block-long-lines": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block-message": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block-notify": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block-options": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block-prack": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block-publish": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block-refer": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block-register": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block-subscribe": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block-unknown": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block-update": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"bye-rate": {"required": False, "type": "int"},
"call-keepalive": {"required": False, "type": "int"},
"cancel-rate": {"required": False, "type": "int"},
"contact-fixup": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"hnt-restrict-source-ip": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"hosted-nat-traversal": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"info-rate": {"required": False, "type": "int"},
"invite-rate": {"required": False, "type": "int"},
"ips-rtp": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"log-call-summary": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"log-violations": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"malformed-header-allow": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-call-id": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-contact": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-content-length": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-content-type": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-cseq": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-expires": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-from": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-max-forwards": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-p-asserted-identity": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-rack": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-record-route": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-route": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-rseq": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-sdp-a": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-sdp-b": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-sdp-c": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-sdp-i": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-sdp-k": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-sdp-m": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-sdp-o": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-sdp-r": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-sdp-s": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-sdp-t": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-sdp-v": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-sdp-z": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-to": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-header-via": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed-request-line": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"max-body-length": {"required": False, "type": "int"},
"max-dialogs": {"required": False, "type": "int"},
"max-idle-dialogs": {"required": False, "type": "int"},
"max-line-length": {"required": False, "type": "int"},
"message-rate": {"required": False, "type": "int"},
"nat-trace": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"no-sdp-fixup": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"notify-rate": {"required": False, "type": "int"},
"open-contact-pinhole": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"open-record-route-pinhole": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"open-register-pinhole": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"open-via-pinhole": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"options-rate": {"required": False, "type": "int"},
"prack-rate": {"required": False, "type": "int"},
"preserve-override": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"provisional-invite-expiry-time": {"required": False, "type": "int"},
"publish-rate": {"required": False, "type": "int"},
"refer-rate": {"required": False, "type": "int"},
"register-contact-trace": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"register-rate": {"required": False, "type": "int"},
"rfc2543-branch": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"rtp": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"ssl-algorithm": {"required": False, "type": "str",
"choices": ["high", "medium", "low"]},
"ssl-auth-client": {"required": False, "type": "str"},
"ssl-auth-server": {"required": False, "type": "str"},
"ssl-client-certificate": {"required": False, "type": "str"},
"ssl-client-renegotiation": {"required": False, "type": "str",
"choices": ["allow", "deny", "secure"]},
"ssl-max-version": {"required": False, "type": "str",
"choices": ["ssl-3.0", "tls-1.0", "tls-1.1",
"tls-1.2"]},
"ssl-min-version": {"required": False, "type": "str",
"choices": ["ssl-3.0", "tls-1.0", "tls-1.1",
"tls-1.2"]},
"ssl-mode": {"required": False, "type": "str",
"choices": ["off", "full"]},
"ssl-pfs": {"required": False, "type": "str",
"choices": ["require", "deny", "allow"]},
"ssl-send-empty-frags": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssl-server-certificate": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"strict-register": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"subscribe-rate": {"required": False, "type": "int"},
"unknown-header": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"update-rate": {"required": False, "type": "int"}
}}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
is_error, has_changed, result = fortios_voip(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
sudosurootdev/external_chromium_org
|
third_party/closure_linter/closure_linter/tokenutil_test.py
|
109
|
7678
|
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the scopeutil module."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('nnaze@google.com (Nathan Naze)')
import unittest as googletest
from closure_linter import ecmametadatapass
from closure_linter import javascripttokens
from closure_linter import testutil
from closure_linter import tokenutil
class FakeToken(object):
pass
class TokenUtilTest(googletest.TestCase):
def testGetTokenRange(self):
a = FakeToken()
b = FakeToken()
c = FakeToken()
d = FakeToken()
e = FakeToken()
a.next = b
b.next = c
c.next = d
self.assertEquals([a, b, c, d], tokenutil.GetTokenRange(a, d))
# This is an error as e does not come after a in the token chain.
self.assertRaises(Exception, lambda: tokenutil.GetTokenRange(a, e))
def testTokensToString(self):
a = FakeToken()
b = FakeToken()
c = FakeToken()
d = FakeToken()
e = FakeToken()
a.string = 'aaa'
b.string = 'bbb'
c.string = 'ccc'
d.string = 'ddd'
e.string = 'eee'
a.line_number = 5
b.line_number = 6
c.line_number = 6
d.line_number = 10
e.line_number = 11
self.assertEquals(
'aaa\nbbbccc\n\n\n\nddd\neee',
tokenutil.TokensToString([a, b, c, d, e]))
self.assertEquals(
'ddd\neee\naaa\nbbbccc',
tokenutil.TokensToString([d, e, a, b, c]),
'Neighboring tokens not in line_number order should have a newline '
'between them.')
def testGetPreviousCodeToken(self):
tokens = testutil.TokenizeSource("""
start1. // comment
/* another comment */
end1
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
None,
tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('start1')))
self.assertEquals(
'start1.',
tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('end1')).string)
def testGetNextCodeToken(self):
tokens = testutil.TokenizeSource("""
start1. // comment
/* another comment */
end1
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
'end1',
tokenutil.GetNextCodeToken(_GetTokenStartingWith('start1')).string)
self.assertEquals(
None,
tokenutil.GetNextCodeToken(_GetTokenStartingWith('end1')))
def testGetIdentifierStart(self):
tokens = testutil.TokenizeSource("""
start1 . // comment
prototype. /* another comment */
end1
['edge'][case].prototype.
end2 = function() {}
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
'start1',
tokenutil.GetIdentifierStart(_GetTokenStartingWith('end1')).string)
self.assertEquals(
'start1',
tokenutil.GetIdentifierStart(_GetTokenStartingWith('start1')).string)
self.assertEquals(
None,
tokenutil.GetIdentifierStart(_GetTokenStartingWith('end2')))
def testInsertTokenBefore(self):
self.AssertInsertTokenAfterBefore(False)
def testInsertTokenAfter(self):
self.AssertInsertTokenAfterBefore(True)
def AssertInsertTokenAfterBefore(self, after):
new_token = javascripttokens.JavaScriptToken(
'a', javascripttokens.JavaScriptTokenType.IDENTIFIER, 1, 1)
existing_token1 = javascripttokens.JavaScriptToken(
'var', javascripttokens.JavaScriptTokenType.KEYWORD, 1, 1)
existing_token1.start_index = 0
existing_token1.metadata = ecmametadatapass.EcmaMetaData()
existing_token2 = javascripttokens.JavaScriptToken(
' ', javascripttokens.JavaScriptTokenType.WHITESPACE, 1, 1)
existing_token2.start_index = 3
existing_token2.metadata = ecmametadatapass.EcmaMetaData()
existing_token2.metadata.last_code = existing_token1
existing_token1.next = existing_token2
existing_token2.previous = existing_token1
if after:
tokenutil.InsertTokenAfter(new_token, existing_token1)
else:
tokenutil.InsertTokenBefore(new_token, existing_token2)
self.assertEquals(existing_token1, new_token.previous)
self.assertEquals(existing_token2, new_token.next)
self.assertEquals(new_token, existing_token1.next)
self.assertEquals(new_token, existing_token2.previous)
self.assertEquals(existing_token1, new_token.metadata.last_code)
self.assertEquals(new_token, existing_token2.metadata.last_code)
self.assertEquals(0, existing_token1.start_index)
self.assertEquals(3, new_token.start_index)
self.assertEquals(4, existing_token2.start_index)
def testGetIdentifierForToken(self):
tokens = testutil.TokenizeSource("""
start1.abc.def.prototype.
onContinuedLine
(start2.abc.def
.hij.klm
.nop)
start3.abc.def
.hij = function() {};
// An absurd multi-liner.
start4.abc.def.
hij.
klm = function() {};
start5 . aaa . bbb . ccc
shouldntBePartOfThePreviousSymbol
start6.abc.def ghi.shouldntBePartOfThePreviousSymbol
var start7 = 42;
function start8() {
}
start9.abc. // why is there a comment here?
def /* another comment */
shouldntBePart
start10.abc // why is there a comment here?
.def /* another comment */
shouldntBePart
start11.abc. middle1.shouldNotBeIdentifier
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
'start1.abc.def.prototype.onContinuedLine',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start1')))
self.assertEquals(
'start2.abc.def.hij.klm.nop',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start2')))
self.assertEquals(
'start3.abc.def.hij',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start3')))
self.assertEquals(
'start4.abc.def.hij.klm',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start4')))
self.assertEquals(
'start5.aaa.bbb.ccc',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start5')))
self.assertEquals(
'start6.abc.def',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start6')))
self.assertEquals(
'start7',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start7')))
self.assertEquals(
'start8',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start8')))
self.assertEquals(
'start9.abc.def',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start9')))
self.assertEquals(
'start10.abc.def',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start10')))
self.assertIsNone(
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('middle1')))
if __name__ == '__main__':
googletest.main()
|
bsd-3-clause
|
Southpaw-TACTIC/Team
|
src/python/Lib/hotshot/log.py
|
20
|
6433
|
import _hotshot
import os.path
import parser
import symbol
from _hotshot import \
WHAT_ENTER, \
WHAT_EXIT, \
WHAT_LINENO, \
WHAT_DEFINE_FILE, \
WHAT_DEFINE_FUNC, \
WHAT_ADD_INFO
__all__ = ["LogReader", "ENTER", "EXIT", "LINE"]
ENTER = WHAT_ENTER
EXIT = WHAT_EXIT
LINE = WHAT_LINENO
class LogReader:
def __init__(self, logfn):
# fileno -> filename
self._filemap = {}
# (fileno, lineno) -> filename, funcname
self._funcmap = {}
self._reader = _hotshot.logreader(logfn)
self._nextitem = self._reader.next
self._info = self._reader.info
if 'current-directory' in self._info:
self.cwd = self._info['current-directory']
else:
self.cwd = None
# This mirrors the call stack of the profiled code as the log
# is read back in. It contains tuples of the form:
#
# (file name, line number of function def, function name)
#
self._stack = []
self._append = self._stack.append
self._pop = self._stack.pop
def close(self):
self._reader.close()
def fileno(self):
"""Return the file descriptor of the log reader's log file."""
return self._reader.fileno()
def addinfo(self, key, value):
"""This method is called for each additional ADD_INFO record.
This can be overridden by applications that want to receive
these events. The default implementation does not need to be
called by alternate implementations.
The initial set of ADD_INFO records do not pass through this
mechanism; this is only needed to receive notification when
new values are added. Subclasses can inspect self._info after
calling LogReader.__init__().
"""
pass
def get_filename(self, fileno):
try:
return self._filemap[fileno]
except KeyError:
raise ValueError, "unknown fileno"
def get_filenames(self):
return self._filemap.values()
def get_fileno(self, filename):
filename = os.path.normcase(os.path.normpath(filename))
for fileno, name in self._filemap.items():
if name == filename:
return fileno
raise ValueError, "unknown filename"
def get_funcname(self, fileno, lineno):
try:
return self._funcmap[(fileno, lineno)]
except KeyError:
raise ValueError, "unknown function location"
# Iteration support:
# This adds an optional (& ignored) parameter to next() so that the
# same bound method can be used as the __getitem__() method -- this
# avoids using an additional method call which kills the performance.
def next(self, index=0):
while 1:
# This call may raise StopIteration:
what, tdelta, fileno, lineno = self._nextitem()
# handle the most common cases first
if what == WHAT_ENTER:
filename, funcname = self._decode_location(fileno, lineno)
t = (filename, lineno, funcname)
self._append(t)
return what, t, tdelta
if what == WHAT_EXIT:
try:
return what, self._pop(), tdelta
except IndexError:
raise StopIteration
if what == WHAT_LINENO:
filename, firstlineno, funcname = self._stack[-1]
return what, (filename, lineno, funcname), tdelta
if what == WHAT_DEFINE_FILE:
filename = os.path.normcase(os.path.normpath(tdelta))
self._filemap[fileno] = filename
elif what == WHAT_DEFINE_FUNC:
filename = self._filemap[fileno]
self._funcmap[(fileno, lineno)] = (filename, tdelta)
elif what == WHAT_ADD_INFO:
# value already loaded into self.info; call the
# overridable addinfo() handler so higher-level code
# can pick up the new value
if tdelta == 'current-directory':
self.cwd = lineno
self.addinfo(tdelta, lineno)
else:
raise ValueError, "unknown event type"
def __iter__(self):
return self
#
# helpers
#
def _decode_location(self, fileno, lineno):
try:
return self._funcmap[(fileno, lineno)]
except KeyError:
#
# This should only be needed when the log file does not
# contain all the DEFINE_FUNC records needed to allow the
# function name to be retrieved from the log file.
#
if self._loadfile(fileno):
filename = funcname = None
try:
filename, funcname = self._funcmap[(fileno, lineno)]
except KeyError:
filename = self._filemap.get(fileno)
funcname = None
self._funcmap[(fileno, lineno)] = (filename, funcname)
return filename, funcname
def _loadfile(self, fileno):
try:
filename = self._filemap[fileno]
except KeyError:
print "Could not identify fileId", fileno
return 1
if filename is None:
return 1
absname = os.path.normcase(os.path.join(self.cwd, filename))
try:
fp = open(absname)
except IOError:
return
st = parser.suite(fp.read())
fp.close()
# Scan the tree looking for def and lambda nodes, filling in
# self._funcmap with all the available information.
funcdef = symbol.funcdef
lambdef = symbol.lambdef
stack = [st.totuple(1)]
while stack:
tree = stack.pop()
try:
sym = tree[0]
except (IndexError, TypeError):
continue
if sym == funcdef:
self._funcmap[(fileno, tree[2][2])] = filename, tree[2][1]
elif sym == lambdef:
self._funcmap[(fileno, tree[1][2])] = filename, "<lambda>"
stack.extend(list(tree[1:]))
|
epl-1.0
|
dednal/chromium.src
|
tools/telemetry/telemetry/core/backends/chrome/oobe.py
|
15
|
2036
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.core import web_contents
class Oobe(web_contents.WebContents):
def __init__(self, inspector_backend):
super(Oobe, self).__init__(inspector_backend)
def _GaiaLoginContext(self):
max_context_id = self.EnableAllContexts()
logging.debug('%d contexts in Gaia page' % max_context_id)
for gaia_context in range(max_context_id + 1):
try:
if self.EvaluateJavaScriptInContext(
"document.readyState == 'complete' && "
"document.getElementById('Email') != null",
gaia_context):
return gaia_context
except exceptions.EvaluateException:
pass
return None
def _ExecuteOobeApi(self, api, *args):
logging.info('Invoking %s' % api)
self.WaitForJavaScriptExpression("typeof Oobe == 'function'", 20)
if self.EvaluateJavaScript("typeof %s == 'undefined'" % api):
raise exceptions.LoginException('%s js api missing' % api)
js = api + '(' + ("'%s'," * len(args)).rstrip(',') + ');'
self.ExecuteJavaScript(js % args)
def NavigateGuestLogin(self):
"""Logs in as guest."""
self._ExecuteOobeApi('Oobe.guestLoginForTesting')
def NavigateFakeLogin(self, username, password):
"""Fake user login."""
self._ExecuteOobeApi('Oobe.loginForTesting', username, password)
def NavigateGaiaLogin(self, username, password):
"""Logs in to GAIA with provided credentials."""
self._ExecuteOobeApi('Oobe.addUserForTesting')
gaia_context = util.WaitFor(self._GaiaLoginContext, timeout=30)
self.ExecuteJavaScriptInContext("""
document.getElementById('Email').value='%s';
document.getElementById('Passwd').value='%s';
document.getElementById('signIn').click();"""
% (username, password),
gaia_context)
|
bsd-3-clause
|
benjaminrigaud/django
|
tests/migrations/test_autodetector.py
|
1
|
81958
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.test import TestCase, override_settings
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.state import ProjectState, ModelState
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.loader import MigrationLoader
from django.db import models, connection
from django.contrib.auth.models import AbstractBaseUser
class DeconstructableObject(object):
"""
A custom deconstructable object.
"""
def deconstruct(self):
return self.__module__ + '.' + self.__class__.__name__, [], {}
class AutodetectorTests(TestCase):
"""
Tests the migration autodetector.
"""
author_empty = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True))])
author_name = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200))])
author_name_null = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, null=True))])
author_name_longer = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=400))])
author_name_renamed = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("names", models.CharField(max_length=200))])
author_name_default = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default='Ada Lovelace'))])
author_name_deconstructable_1 = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default=DeconstructableObject()))])
author_name_deconstructable_2 = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default=DeconstructableObject()))])
author_name_deconstructable_3 = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default=models.IntegerField()))])
author_name_deconstructable_4 = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default=models.IntegerField()))])
author_custom_pk = ModelState("testapp", "Author", [("pk_field", models.IntegerField(primary_key=True))])
author_with_book = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book"))])
author_with_book_order_wrt = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book"))], options={"order_with_respect_to": "book"})
author_renamed_with_book = ModelState("testapp", "Writer", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book"))])
author_with_publisher_string = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("publisher_name", models.CharField(max_length=200))])
author_with_publisher = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("publisher", models.ForeignKey("testapp.Publisher"))])
author_with_custom_user = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("user", models.ForeignKey("thirdapp.CustomUser"))])
author_proxy = ModelState("testapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author", ))
author_proxy_options = ModelState("testapp", "AuthorProxy", [], {"proxy": True, "verbose_name": "Super Author"}, ("testapp.author", ))
author_proxy_notproxy = ModelState("testapp", "AuthorProxy", [], {}, ("testapp.author", ))
author_proxy_third = ModelState("thirdapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author", ))
author_proxy_proxy = ModelState("testapp", "AAuthorProxyProxy", [], {"proxy": True}, ("testapp.authorproxy", ))
author_unmanaged = ModelState("testapp", "AuthorUnmanaged", [], {"managed": False}, ("testapp.author", ))
author_unmanaged_managed = ModelState("testapp", "AuthorUnmanaged", [], {}, ("testapp.author", ))
author_unmanaged_default_pk = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True))])
author_unmanaged_custom_pk = ModelState("testapp", "Author", [
("pk_field", models.IntegerField(primary_key=True)),
])
author_with_m2m = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher")),
])
author_with_m2m_through = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("publishers", models.ManyToManyField("testapp.Publisher", through="testapp.Contract"))])
author_with_options = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True))], {"verbose_name": "Authi", "permissions": [('can_hire', 'Can hire')]})
author_with_db_table_options = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True))
], {"db_table": "author_one"})
author_with_new_db_table_options = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True))
], {"db_table": "author_two"})
author_renamed_with_db_table_options = ModelState("testapp", "NewAuthor", [
("id", models.AutoField(primary_key=True))
], {"db_table": "author_one"})
contract = ModelState("testapp", "Contract", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("publisher", models.ForeignKey("testapp.Publisher"))])
publisher = ModelState("testapp", "Publisher", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=100))])
publisher_with_author = ModelState("testapp", "Publisher", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("name", models.CharField(max_length=100))])
publisher_with_aardvark_author = ModelState("testapp", "Publisher", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Aardvark")), ("name", models.CharField(max_length=100))])
publisher_with_book = ModelState("testapp", "Publisher", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("otherapp.Book")), ("name", models.CharField(max_length=100))])
other_pony = ModelState("otherapp", "Pony", [("id", models.AutoField(primary_key=True))])
other_stable = ModelState("otherapp", "Stable", [("id", models.AutoField(primary_key=True))])
third_thing = ModelState("thirdapp", "Thing", [("id", models.AutoField(primary_key=True))])
book = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))])
book_proxy_fk = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("thirdapp.AuthorProxy")), ("title", models.CharField(max_length=200))])
book_migrations_fk = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("migrations.UnmigratedModel")), ("title", models.CharField(max_length=200))])
book_with_no_author = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("title", models.CharField(max_length=200))])
book_with_author_renamed = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Writer")), ("title", models.CharField(max_length=200))])
book_with_field_and_author_renamed = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("writer", models.ForeignKey("testapp.Writer")), ("title", models.CharField(max_length=200))])
book_with_multiple_authors = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("authors", models.ManyToManyField("testapp.Author")), ("title", models.CharField(max_length=200))])
book_with_multiple_authors_through_attribution = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("authors", models.ManyToManyField("testapp.Author", through="otherapp.Attribution")), ("title", models.CharField(max_length=200))])
book_unique = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))], {"unique_together": {("author", "title")}})
book_unique_2 = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))], {"unique_together": {("title", "author")}})
book_unique_3 = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("newfield", models.IntegerField()), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))], {"unique_together": {("title", "newfield")}})
book_unique_4 = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("newfield2", models.IntegerField()),
("author", models.ForeignKey("testapp.Author")),
("title", models.CharField(max_length=200)),
], {"unique_together": {("title", "newfield2")}})
attribution = ModelState("otherapp", "Attribution", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("book", models.ForeignKey("otherapp.Book"))])
edition = ModelState("thirdapp", "Edition", [("id", models.AutoField(primary_key=True)), ("book", models.ForeignKey("otherapp.Book"))])
custom_user = ModelState("thirdapp", "CustomUser", [("id", models.AutoField(primary_key=True)), ("username", models.CharField(max_length=255))], bases=(AbstractBaseUser, ))
custom_user_no_inherit = ModelState("thirdapp", "CustomUser", [("id", models.AutoField(primary_key=True)), ("username", models.CharField(max_length=255))])
aardvark = ModelState("thirdapp", "Aardvark", [("id", models.AutoField(primary_key=True))])
aardvark_testapp = ModelState("testapp", "Aardvark", [("id", models.AutoField(primary_key=True))])
aardvark_based_on_author = ModelState("testapp", "Aardvark", [], bases=("testapp.Author", ))
aardvark_pk_fk_author = ModelState("testapp", "Aardvark", [("id", models.OneToOneField("testapp.Author", primary_key=True))])
knight = ModelState("eggs", "Knight", [("id", models.AutoField(primary_key=True))])
rabbit = ModelState("eggs", "Rabbit", [("id", models.AutoField(primary_key=True)), ("knight", models.ForeignKey("eggs.Knight")), ("parent", models.ForeignKey("eggs.Rabbit"))], {"unique_together": {("parent", "knight")}})
def repr_changes(self, changes):
output = ""
for app_label, migrations in sorted(changes.items()):
output += " %s:\n" % app_label
for migration in migrations:
output += " %s\n" % migration.name
for operation in migration.operations:
output += " %s\n" % operation
return output
def assertNumberMigrations(self, changes, app_label, number):
if len(changes.get(app_label, [])) != number:
self.fail("Incorrect number of migrations (%s) for %s (expected %s)\n%s" % (
len(changes.get(app_label, [])),
app_label,
number,
self.repr_changes(changes),
))
def assertOperationTypes(self, changes, app_label, index, types):
if not changes.get(app_label, None):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < index + 1:
self.fail("No migration at index %s for %s\n%s" % (index, app_label, self.repr_changes(changes)))
migration = changes[app_label][index]
real_types = [operation.__class__.__name__ for operation in migration.operations]
if types != real_types:
self.fail("Operation type mismatch for %s.%s (expected %s):\n%s" % (
app_label,
migration.name,
types,
self.repr_changes(changes),
))
def assertOperationAttributes(self, changes, app_label, index, operation_index, **attrs):
if not changes.get(app_label, None):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < index + 1:
self.fail("No migration at index %s for %s\n%s" % (index, app_label, self.repr_changes(changes)))
migration = changes[app_label][index]
if len(changes[app_label]) < index + 1:
self.fail("No operation at index %s for %s.%s\n%s" % (
operation_index,
app_label,
migration.name,
self.repr_changes(changes),
))
operation = migration.operations[operation_index]
for attr, value in attrs.items():
if getattr(operation, attr, None) != value:
self.fail("Attribute mismatch for %s.%s op #%s, %s (expected %r, got %r):\n%s" % (
app_label,
migration.name,
operation_index,
attr,
value,
getattr(operation, attr, None),
self.repr_changes(changes),
))
def make_project_state(self, model_states):
"Shortcut to make ProjectStates from lists of predefined models"
project_state = ProjectState()
for model_state in model_states:
project_state.add_model_state(model_state.clone())
return project_state
def test_arrange_for_graph(self):
"Tests auto-naming of migrations for graph matching."
# Make a fake graph
graph = MigrationGraph()
graph.add_node(("testapp", "0001_initial"), None)
graph.add_node(("testapp", "0002_foobar"), None)
graph.add_node(("otherapp", "0001_initial"), None)
graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("testapp", "0001_initial"))
graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("otherapp", "0001_initial"))
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
changes = autodetector.arrange_for_graph(changes, graph)
# Make sure there's a new name, deps match, etc.
self.assertEqual(changes["testapp"][0].name, "0003_author")
self.assertEqual(changes["testapp"][0].dependencies, [("testapp", "0002_foobar")])
self.assertEqual(changes["otherapp"][0].name, "0002_pony_stable")
self.assertEqual(changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")])
def test_trim_apps(self):
"Tests that trim does not remove dependencies but does remove unwanted apps"
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable, self.third_thing])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner(defaults={"ask_initial": True}))
changes = autodetector._detect_changes()
# Run through arrange_for_graph
graph = MigrationGraph()
changes = autodetector.arrange_for_graph(changes, graph)
changes["testapp"][0].dependencies.append(("otherapp", "0001_initial"))
changes = autodetector._trim_to_apps(changes, {"testapp"})
# Make sure there's the right set of migrations
self.assertEqual(changes["testapp"][0].name, "0001_initial")
self.assertEqual(changes["otherapp"][0].name, "0001_initial")
self.assertNotIn("thirdapp", changes)
def test_custom_migration_name(self):
"Tests custom naming of migrations for graph matching."
# Make a fake graph
graph = MigrationGraph()
graph.add_node(("testapp", "0001_initial"), None)
graph.add_node(("testapp", "0002_foobar"), None)
graph.add_node(("otherapp", "0001_initial"), None)
graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("testapp", "0001_initial"))
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
migration_name = 'custom_name'
changes = autodetector.arrange_for_graph(changes, graph, migration_name)
# Make sure there's a new name, deps match, etc.
self.assertEqual(changes["testapp"][0].name, "0003_%s" % migration_name)
self.assertEqual(changes["testapp"][0].dependencies, [("testapp", "0002_foobar")])
self.assertEqual(changes["otherapp"][0].name, "0002_%s" % migration_name)
self.assertEqual(changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")])
def test_new_model(self):
"Tests autodetection of new models"
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
self.assertEqual(action.name, "Author")
def test_old_model(self):
"Tests deletion of old models"
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "DeleteModel")
self.assertEqual(action.name, "Author")
def test_add_field(self):
"Tests autodetection of new fields"
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AddField")
self.assertEqual(action.name, "name")
def test_remove_field(self):
"Tests autodetection of removed fields"
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "RemoveField")
self.assertEqual(action.name, "name")
def test_alter_field(self):
"Tests autodetection of new fields"
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_name_longer])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterField")
self.assertEqual(action.name, "name")
self.assertTrue(action.preserve_default)
def test_alter_field_to_not_null_with_default(self):
"#23609 - Tests autodetection of nullable to non-nullable alterations"
class CustomQuestioner(MigrationQuestioner):
def ask_not_null_alteration(self, field_name, model_name):
raise Exception("Should not have prompted for not null addition")
# Make state
before = self.make_project_state([self.author_name_null])
after = self.make_project_state([self.author_name_default])
autodetector = MigrationAutodetector(before, after, CustomQuestioner())
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterField")
self.assertEqual(action.name, "name")
self.assertTrue(action.preserve_default)
self.assertEqual(action.field.default, 'Ada Lovelace')
def test_alter_field_to_not_null_without_default(self):
"#23609 - Tests autodetection of nullable to non-nullable alterations"
class CustomQuestioner(MigrationQuestioner):
def ask_not_null_alteration(self, field_name, model_name):
# Ignore for now, and let me handle existing rows with NULL
# myself (e.g. adding a RunPython or RunSQL operation in the new
# migration file before the AlterField operation)
return models.NOT_PROVIDED
# Make state
before = self.make_project_state([self.author_name_null])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after, CustomQuestioner())
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterField")
self.assertEqual(action.name, "name")
self.assertTrue(action.preserve_default)
self.assertIs(action.field.default, models.NOT_PROVIDED)
def test_alter_field_to_not_null_oneoff_default(self):
"#23609 - Tests autodetection of nullable to non-nullable alterations"
class CustomQuestioner(MigrationQuestioner):
def ask_not_null_alteration(self, field_name, model_name):
# Provide a one-off default now (will be set on all existing rows)
return 'Some Name'
# Make state
before = self.make_project_state([self.author_name_null])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after, CustomQuestioner())
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterField")
self.assertEqual(action.name, "name")
self.assertFalse(action.preserve_default)
self.assertEqual(action.field.default, "Some Name")
def test_rename_field(self):
"Tests autodetection of renamed fields"
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_name_renamed])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename": True}))
changes = autodetector._detect_changes()
# Check
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, old_name="name", new_name="names")
def test_rename_model(self):
"Tests autodetection of renamed models"
# Make state
before = self.make_project_state([self.author_with_book, self.book])
after = self.make_project_state([self.author_renamed_with_book, self.book_with_author_renamed])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename_model": True}))
changes = autodetector._detect_changes()
# Right number of migrations for model rename?
self.assertNumberMigrations(changes, 'testapp', 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "RenameModel")
self.assertEqual(action.old_name, "Author")
self.assertEqual(action.new_name, "Writer")
# Now that RenameModel handles related fields too, there should be
# no AlterField for the related field.
self.assertNumberMigrations(changes, 'otherapp', 0)
def test_rename_model_with_renamed_rel_field(self):
"""
Tests autodetection of renamed models while simultaneously renaming one
of the fields that relate to the renamed model.
"""
# Make state
before = self.make_project_state([self.author_with_book, self.book])
after = self.make_project_state([self.author_renamed_with_book, self.book_with_field_and_author_renamed])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename_model": True, "ask_rename": True}))
changes = autodetector._detect_changes()
# Right number of migrations for model rename?
self.assertNumberMigrations(changes, 'testapp', 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right actions?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "RenameModel")
self.assertEqual(action.old_name, "Author")
self.assertEqual(action.new_name, "Writer")
# Right number of migrations for related field rename?
# Alter is already taken care of.
self.assertNumberMigrations(changes, 'otherapp', 1)
# Right number of actions?
migration = changes['otherapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right actions?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "RenameField")
self.assertEqual(action.old_name, "author")
self.assertEqual(action.new_name, "writer")
def test_fk_dependency(self):
"Tests that having a ForeignKey automatically adds a dependency"
# Make state
# Note that testapp (author) has no dependencies,
# otherapp (book) depends on testapp (author),
# thirdapp (edition) depends on otherapp (book)
before = self.make_project_state([])
after = self.make_project_state([self.author_name, self.book, self.edition])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
self.assertEqual(len(changes['otherapp']), 1)
self.assertEqual(len(changes['thirdapp']), 1)
# Right number of actions?
migration1 = changes['testapp'][0]
self.assertEqual(len(migration1.operations), 1)
migration2 = changes['otherapp'][0]
self.assertEqual(len(migration2.operations), 1)
migration3 = changes['thirdapp'][0]
self.assertEqual(len(migration3.operations), 1)
# Right actions?
action = migration1.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
action = migration2.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
action = migration3.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
# Right dependencies?
self.assertEqual(migration1.dependencies, [])
self.assertEqual(migration2.dependencies, [("testapp", "auto_1")])
self.assertEqual(migration3.dependencies, [("otherapp", "auto_1")])
def test_proxy_fk_dependency(self):
"Tests that FK dependencies still work on proxy models"
# Make state
# Note that testapp (author) has no dependencies,
# otherapp (book) depends on testapp (authorproxy)
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.author_proxy_third, self.book_proxy_fk])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertNumberMigrations(changes, 'thirdapp', 1)
# Right number of actions?
# Right actions?
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel"])
# Right dependencies?
self.assertEqual(changes['testapp'][0].dependencies, [])
self.assertEqual(changes['otherapp'][0].dependencies, [("thirdapp", "auto_1")])
self.assertEqual(changes['thirdapp'][0].dependencies, [("testapp", "auto_1")])
def test_same_app_no_fk_dependency(self):
"""
Tests that a migration with a FK between two models of the same app
does not have a dependency to itself.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_with_publisher, self.publisher])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher")
# Right dependencies?
self.assertEqual(changes['testapp'][0].dependencies, [])
def test_circular_fk_dependency(self):
"""
Tests that having a circular ForeignKey dependency automatically
resolves the situation into 2 migrations on one side and 1 on the other.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_with_book, self.book, self.publisher_with_book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertNumberMigrations(changes, 'otherapp', 2)
# Right types?
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'otherapp', 1, ["AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
# Right dependencies?
self.assertEqual(changes['testapp'][0].dependencies, [("otherapp", "auto_1")])
self.assertEqual(changes['otherapp'][0].dependencies, [])
self.assertEqual(set(changes['otherapp'][1].dependencies), {("otherapp", "auto_1"), ("testapp", "auto_1")})
def test_same_app_circular_fk_dependency(self):
"""
Tests that a migration with a FK between two models of the same app
does not have a dependency to itself.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_with_publisher, self.publisher_with_author])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher")
# Right dependencies?
self.assertEqual(changes['testapp'][0].dependencies, [])
def test_same_app_circular_fk_dependency_and_unique_together(self):
"""
Tests that a migration with circular FK dependency does not try to
create unique together constraint before creating all required fields first.
See ticket #22275.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.knight, self.rabbit])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'eggs', 1)
self.assertOperationTypes(changes, 'eggs', 0, ["CreateModel", "CreateModel", "AlterUniqueTogether"])
self.assertFalse("unique_together" in changes['eggs'][0].operations[0].options)
self.assertFalse("unique_together" in changes['eggs'][0].operations[1].options)
# Right dependencies?
self.assertEqual(changes['eggs'][0].dependencies, [])
def test_unique_together(self):
"Tests unique_together detection"
# Make state
before = self.make_project_state([self.author_empty, self.book])
after = self.make_project_state([self.author_empty, self.book_unique])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['otherapp']), 1)
# Right number of actions?
migration = changes['otherapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterUniqueTogether")
self.assertEqual(action.name, "book")
self.assertEqual(action.unique_together, {("author", "title")})
def test_unique_together_no_changes(self):
"Tests that unique_togther doesn't generate a migration if no changes have been made"
# Make state
before = self.make_project_state([self.author_empty, self.book_unique])
after = self.make_project_state([self.author_empty, self.book_unique])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_alter_db_table_add(self):
"""Tests detection for adding db_table in model's options"""
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_with_db_table_options])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterModelTable")
self.assertEqual(action.name, "author")
self.assertEqual(action.table, "author_one")
def test_alter_db_table_change(self):
"Tests detection for changing db_table in model's options'"
# Make state
before = self.make_project_state([self.author_with_db_table_options])
after = self.make_project_state([self.author_with_new_db_table_options])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterModelTable")
self.assertEqual(action.name, "author")
self.assertEqual(action.table, "author_two")
def test_alter_db_table_remove(self):
"""Tests detection for removing db_table in model's options"""
# Make state
before = self.make_project_state([self.author_with_db_table_options])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterModelTable")
self.assertEqual(action.name, "author")
self.assertEqual(action.table, None)
def test_alter_db_table_no_changes(self):
"""
Tests that alter_db_table doesn't generate a migration if no changes
have been made.
"""
# Make state
before = self.make_project_state([self.author_with_db_table_options])
after = self.make_project_state([self.author_with_db_table_options])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_alter_db_table_with_model_change(self):
"""
Tests when model changes, autodetector does not create more than one
operation.
"""
# Make state
before = self.make_project_state([self.author_with_db_table_options])
after = self.make_project_state([self.author_renamed_with_db_table_options])
autodetector = MigrationAutodetector(
before, after, MigrationQuestioner({"ask_rename_model": True})
)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 1)
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
def test_empty_foo_together(self):
"#23452 - Empty unique/index_togther shouldn't generate a migration."
# Explicitly testing for not specified, since this is the case after
# a CreateModel operation w/o any definition on the original model
model_state_not_secified = ModelState("a", "model",
[("id", models.AutoField(primary_key=True))]
)
# Explicitly testing for None, since this was the issue in #23452 after
# a AlterFooTogether operation with e.g. () as value
model_state_none = ModelState("a", "model",
[("id", models.AutoField(primary_key=True))],
{"unique_together": None, "index_together": None}
)
# Explicitly testing for the empty set, since we now always have sets.
# During removal (('col1', 'col2'),) --> () this becomes set([])
model_state_empty = ModelState("a", "model",
[("id", models.AutoField(primary_key=True))],
{"unique_together": set(), "index_together": set()}
)
def test(from_state, to_state, msg):
before = self.make_project_state([from_state])
after = self.make_project_state([to_state])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
if len(changes) > 0:
ops = ', '.join(o.__class__.__name__ for o in changes['a'][0].operations)
self.fail('Created operation(s) %s from %s' % (ops, msg))
tests = (
(model_state_not_secified, model_state_not_secified, '"not specified" to "not specified"'),
(model_state_not_secified, model_state_none, '"not specified" to "None"'),
(model_state_not_secified, model_state_empty, '"not specified" to "empty"'),
(model_state_none, model_state_not_secified, '"None" to "not specified"'),
(model_state_none, model_state_none, '"None" to "None"'),
(model_state_none, model_state_empty, '"None" to "empty"'),
(model_state_empty, model_state_not_secified, '"empty" to "not specified"'),
(model_state_empty, model_state_none, '"empty" to "None"'),
(model_state_empty, model_state_empty, '"empty" to "empty"'),
)
for t in tests:
test(*t)
def test_unique_together_ordering(self):
"Tests that unique_together also triggers on ordering changes"
# Make state
before = self.make_project_state([self.author_empty, self.book_unique])
after = self.make_project_state([self.author_empty, self.book_unique_2])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together={("title", "author")})
def test_add_field_and_unique_together(self):
"Tests that added fields will be created before using them in unique together"
before = self.make_project_state([self.author_empty, self.book])
after = self.make_project_state([self.author_empty, self.book_unique_3])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AddField", "AlterUniqueTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", unique_together={("title", "newfield")})
def test_remove_field_and_unique_together(self):
"Tests that removed fields will be removed after updating unique_together"
before = self.make_project_state([self.author_empty, self.book_unique_3])
after = self.make_project_state([self.author_empty, self.book_unique])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether", "RemoveField"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together={("author", "title")})
def test_rename_field_and_unique_together(self):
"Tests that removed fields will be removed after updating unique together"
before = self.make_project_state([self.author_empty, self.book_unique_3])
after = self.make_project_state([self.author_empty, self.book_unique_4])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename": True}))
changes = autodetector._detect_changes()
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RenameField", "AlterUniqueTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", unique_together={("title", "newfield2")})
def test_remove_index_together(self):
author_index_together = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200))
], {"index_together": {("id", "name")}})
before = self.make_project_state([author_index_together])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterIndexTogether"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", index_together=set())
def test_remove_unique_together(self):
author_unique_together = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200))
], {"unique_together": {("id", "name")}})
before = self.make_project_state([author_unique_together])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterUniqueTogether"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", unique_together=set())
def test_proxy(self):
"Tests that the autodetector correctly deals with proxy models"
# First, we test adding a proxy model
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_empty, self.author_proxy])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="AuthorProxy", options={"proxy": True})
# Now, we test turning a proxy model into a non-proxy model
# It should delete the proxy then make the real one
before = self.make_project_state([self.author_empty, self.author_proxy])
after = self.make_project_state([self.author_empty, self.author_proxy_notproxy])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["DeleteModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="AuthorProxy")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="AuthorProxy", options={})
def test_proxy_custom_pk(self):
"#23415 - The autodetector must correctly deal with custom FK on proxy models."
# First, we test the default pk field name
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.author_proxy_third, self.book_proxy_fk])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].rel.field_name, 'id')
# Now, we test the custom pk field name
before = self.make_project_state([])
after = self.make_project_state([self.author_custom_pk, self.author_proxy_third, self.book_proxy_fk])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].rel.field_name, 'pk_field')
def test_unmanaged(self):
"Tests that the autodetector correctly deals with managed models"
# First, we test adding an unmanaged model
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_empty, self.author_unmanaged])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="AuthorUnmanaged")
self.assertEqual(changes['testapp'][0].operations[0].options['managed'], False)
# Now, we test turning an unmanaged model into a managed model
before = self.make_project_state([self.author_empty, self.author_unmanaged])
after = self.make_project_state([self.author_empty, self.author_unmanaged_managed])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["DeleteModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="AuthorUnmanaged")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="AuthorUnmanaged")
def test_unmanaged_custom_pk(self):
"#23415 - The autodetector must correctly deal with custom FK on unmanaged models."
# First, we test the default pk field name
before = self.make_project_state([])
after = self.make_project_state([self.author_unmanaged_default_pk, self.book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].rel.field_name, 'id')
# Now, we test the custom pk field name
before = self.make_project_state([])
after = self.make_project_state([self.author_unmanaged_custom_pk, self.book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].rel.field_name, 'pk_field')
@override_settings(AUTH_USER_MODEL="thirdapp.CustomUser")
def test_swappable(self):
before = self.make_project_state([self.custom_user])
after = self.make_project_state([self.custom_user, self.author_with_custom_user])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 1)
# Check the dependency is correct
migration = changes['testapp'][0]
self.assertEqual(migration.dependencies, [("__setting__", "AUTH_USER_MODEL")])
def test_add_field_with_default(self):
"""
Adding a field with a default should work (#22030).
"""
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_name_default])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AddField")
self.assertEqual(action.name, "name")
def test_custom_deconstructable(self):
"""
Two instances which deconstruct to the same value aren't considered a
change.
"""
before = self.make_project_state([self.author_name_deconstructable_1])
after = self.make_project_state([self.author_name_deconstructable_2])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(changes, {})
def test_deconstruct_field_kwarg(self):
"""
Field instances are handled correctly by nested deconstruction.
"""
before = self.make_project_state([self.author_name_deconstructable_3])
after = self.make_project_state([self.author_name_deconstructable_4])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(changes, {})
def test_deconstruct_type(self):
"""
#22951 -- Uninstanted classes with deconstruct are correctly returned
by deep_deconstruct during serialization.
"""
author = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(
max_length=200,
# IntegerField intentionally not instantiated.
default=models.IntegerField,
))
],
)
# Make state
before = self.make_project_state([])
after = self.make_project_state([author])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
def test_replace_string_with_foreignkey(self):
"""
Adding an FK in the same "spot" as a deleted CharField should work. (#22300).
"""
# Make state
before = self.make_project_state([self.author_with_publisher_string])
after = self.make_project_state([self.author_with_publisher, self.publisher])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right result?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "RemoveField", "AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Publisher")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="publisher_name")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="publisher")
def test_foreign_key_removed_before_target_model(self):
"""
Removing an FK and the model it targets in the same change must remove
the FK field before the model to maintain consistency.
"""
before = self.make_project_state([self.author_with_publisher, self.publisher])
after = self.make_project_state([self.author_name]) # removes both the model and FK
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 2)
# Right actions in right order?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "RemoveField")
self.assertEqual(action.name, "publisher")
action = migration.operations[1]
self.assertEqual(action.__class__.__name__, "DeleteModel")
self.assertEqual(action.name, "Publisher")
def test_add_many_to_many(self):
"""
Adding a ManyToManyField should not prompt for a default (#22435).
"""
class CustomQuestioner(MigrationQuestioner):
def ask_not_null_addition(self, field_name, model_name):
raise Exception("Should not have prompted for not null addition")
before = self.make_project_state([self.author_empty, self.publisher])
# Add ManyToManyField to author model
after = self.make_project_state([self.author_with_m2m, self.publisher])
autodetector = MigrationAutodetector(before, after, CustomQuestioner())
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
migration = changes['testapp'][0]
# Right actions in right order?
self.assertEqual(len(migration.operations), 1)
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AddField")
self.assertEqual(action.name, "publishers")
def test_create_with_through_model(self):
"""
Adding a m2m with a through model and the models that use it should
be ordered correctly.
"""
before = self.make_project_state([])
after = self.make_project_state([self.author_with_m2m_through, self.publisher, self.contract])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
# Right actions in right order?
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel", "CreateModel", "CreateModel", "AddField", "AddField"])
def test_many_to_many_removed_before_through_model(self):
"""
Removing a ManyToManyField and the "through" model in the same change must remove
the field before the model to maintain consistency.
"""
before = self.make_project_state([self.book_with_multiple_authors_through_attribution, self.author_name, self.attribution])
after = self.make_project_state([self.book_with_no_author, self.author_name]) # removes both the through model and ManyToMany
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['otherapp']), 1)
# Right number of actions?
migration = changes['otherapp'][0]
self.assertEqual(len(migration.operations), 4)
# Right actions in right order?
# The first two are because we can't optimise RemoveField
# into DeleteModel reliably.
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "RemoveField")
self.assertEqual(action.name, "author")
action = migration.operations[1]
self.assertEqual(action.__class__.__name__, "RemoveField")
self.assertEqual(action.name, "book")
action = migration.operations[2]
self.assertEqual(action.__class__.__name__, "RemoveField")
self.assertEqual(action.name, "authors")
action = migration.operations[3]
self.assertEqual(action.__class__.__name__, "DeleteModel")
self.assertEqual(action.name, "Attribution")
def test_many_to_many_removed_before_through_model_2(self):
"""
Removing a model that contains a ManyToManyField and the
"through" model in the same change must remove
the field before the model to maintain consistency.
"""
before = self.make_project_state([self.book_with_multiple_authors_through_attribution, self.author_name, self.attribution])
after = self.make_project_state([self.author_name]) # removes both the through model and ManyToMany
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
# Right number of actions?
self.assertOperationTypes(changes, 'otherapp', 0, ["RemoveField", "RemoveField", "RemoveField", "DeleteModel", "DeleteModel"])
def test_m2m_w_through_multistep_remove(self):
"""
A model with a m2m field that specifies a "through" model cannot be removed in the same
migration as that through model as the schema will pass through an inconsistent state.
The autodetector should produce two migrations to avoid this issue.
"""
before = self.make_project_state([self.author_with_m2m_through, self.publisher, self.contract])
after = self.make_project_state([self.publisher])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
# Right actions in right order?
self.assertOperationTypes(changes, "testapp", 0, ["RemoveField", "RemoveField", "DeleteModel", "RemoveField", "DeleteModel"])
# Actions touching the right stuff?
self.assertOperationAttributes(changes, "testapp", 0, 0, name="publishers")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="author")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 3, name="publisher")
self.assertOperationAttributes(changes, "testapp", 0, 4, name="Contract")
def test_non_circular_foreignkey_dependency_removal(self):
"""
If two models with a ForeignKey from one to the other are removed at the same time,
the autodetector should remove them in the correct order.
"""
before = self.make_project_state([self.author_with_publisher, self.publisher_with_author])
after = self.make_project_state([])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
# Right actions in right order?
self.assertOperationTypes(changes, "testapp", 0, ["RemoveField", "RemoveField", "DeleteModel", "DeleteModel"])
def test_alter_model_options(self):
"""
Changing a model's options should make a change
"""
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_with_options])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
# Right actions in right order?
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
# Changing them back to empty should also make a change
before = self.make_project_state([self.author_with_options])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
def test_alter_model_options_proxy(self):
"""
Changing a proxy model's options should also make a change
"""
before = self.make_project_state([self.author_proxy, self.author_empty])
after = self.make_project_state([self.author_proxy_options, self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
# Right actions in right order?
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
def test_set_alter_order_with_respect_to(self):
"Tests that setting order_with_respect_to adds a field"
# Make state
before = self.make_project_state([self.book, self.author_with_book])
after = self.make_project_state([self.book, self.author_with_book_order_wrt])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterOrderWithRespectTo"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="author", order_with_respect_to="book")
def test_add_alter_order_with_respect_to(self):
"""
Tests that setting order_with_respect_to when adding the FK too
does things in the right order.
"""
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.book, self.author_with_book_order_wrt])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AlterOrderWithRespectTo"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, model_name="author", name="book")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author", order_with_respect_to="book")
def test_remove_alter_order_with_respect_to(self):
"""
Tests that removing order_with_respect_to when removing the FK too
does things in the right order.
"""
# Make state
before = self.make_project_state([self.book, self.author_with_book_order_wrt])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterOrderWithRespectTo", "RemoveField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="author", order_with_respect_to=None)
self.assertOperationAttributes(changes, 'testapp', 0, 1, model_name="author", name="book")
def test_add_model_order_with_respect_to(self):
"""
Tests that setting order_with_respect_to when adding the whole model
does things in the right order.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.book, self.author_with_book_order_wrt])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "AlterOrderWithRespectTo"])
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author", order_with_respect_to="book")
# Make sure the _order field is not in the CreateModel fields
self.assertNotIn("_order", [name for name, field in changes['testapp'][0].operations[0].fields])
def test_swappable_first_inheritance(self):
"""
Tests that swappable models get their CreateModel first.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.custom_user, self.aardvark])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="CustomUser")
self.assertOperationAttributes(changes, 'thirdapp', 0, 1, name="Aardvark")
@override_settings(AUTH_USER_MODEL="thirdapp.CustomUser")
def test_swappable_first_setting(self):
"""
Tests that swappable models get their CreateModel first.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.custom_user_no_inherit, self.aardvark])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="CustomUser")
self.assertOperationAttributes(changes, 'thirdapp', 0, 1, name="Aardvark")
def test_bases_first(self):
"""
Tests that bases of other models come first.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.aardvark_based_on_author, self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Aardvark")
def test_proxy_bases_first(self):
"""
Tests that bases of proxies come first.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.author_proxy, self.author_proxy_proxy])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="AuthorProxy")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="AAuthorProxyProxy")
def test_pk_fk_included(self):
"""
Tests that a relation used as the primary key is kept as part of CreateModel.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.aardvark_pk_fk_author, self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Aardvark")
def test_first_dependency(self):
"""
Tests that a dependency to an app with no migrations uses __first__.
"""
# Load graph
loader = MigrationLoader(connection)
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.book_migrations_fk])
after.real_apps = ["migrations"]
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes(graph=loader.graph)
# Right number of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
# Right dependencies?
self.assertEqual(changes['otherapp'][0].dependencies, [("migrations", "__first__")])
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_last_dependency(self):
"""
Tests that a dependency to an app with existing migrations uses the
last migration of that app.
"""
# Load graph
loader = MigrationLoader(connection)
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.book_migrations_fk])
after.real_apps = ["migrations"]
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes(graph=loader.graph)
# Right number of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
# Right dependencies?
self.assertEqual(changes['otherapp'][0].dependencies, [("migrations", "0002_second")])
def test_alter_fk_before_model_deletion(self):
"""
Tests that ForeignKeys are altered _before_ the model they used to
refer to are deleted.
"""
# Make state
before = self.make_project_state([self.author_name, self.publisher_with_author])
after = self.make_project_state([self.aardvark_testapp, self.publisher_with_aardvark_author])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "AlterField", "DeleteModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Aardvark")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="Author")
def test_fk_dependency_other_app(self):
"""
Tests that ForeignKeys correctly depend on other apps' models (#23100)
"""
# Make state
before = self.make_project_state([self.author_name, self.book])
after = self.make_project_state([self.author_with_book, self.book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="book")
self.assertEqual(changes['testapp'][0].dependencies, [("otherapp", "__first__")])
def test_circular_dependency_mixed_addcreate(self):
"""
Tests that the dependency resolver knows to put all CreateModel
before AddField and not become unsolvable (#23315)
"""
address = ModelState("a", "Address", [
("id", models.AutoField(primary_key=True)),
("country", models.ForeignKey("b.DeliveryCountry")),
])
person = ModelState("a", "Person", [
("id", models.AutoField(primary_key=True)),
])
apackage = ModelState("b", "APackage", [
("id", models.AutoField(primary_key=True)),
("person", models.ForeignKey("a.Person")),
])
country = ModelState("b", "DeliveryCountry", [
("id", models.AutoField(primary_key=True)),
])
# Make state
before = self.make_project_state([])
after = self.make_project_state([address, person, apackage, country])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'a', 2)
self.assertNumberMigrations(changes, 'b', 1)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel", "CreateModel"])
self.assertOperationTypes(changes, 'a', 1, ["AddField"])
self.assertOperationTypes(changes, 'b', 0, ["CreateModel", "CreateModel"])
@override_settings(AUTH_USER_MODEL="a.Tenant")
def test_circular_dependency_swappable(self):
"""
Tests that the dependency resolver knows to explicitly resolve
swappable models (#23322)
"""
tenant = ModelState("a", "Tenant", [
("id", models.AutoField(primary_key=True)),
("primary_address", models.ForeignKey("b.Address"))],
bases=(AbstractBaseUser, )
)
address = ModelState("b", "Address", [
("id", models.AutoField(primary_key=True)),
("tenant", models.ForeignKey(settings.AUTH_USER_MODEL)),
])
# Make state
before = self.make_project_state([])
after = self.make_project_state([address, tenant])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'a', 2)
self.assertNumberMigrations(changes, 'b', 1)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'a', 1, ["AddField"])
self.assertOperationTypes(changes, 'b', 0, ["CreateModel"])
self.assertEqual(changes['a'][0].dependencies, [])
self.assertEqual(set(changes['a'][1].dependencies), {('a', 'auto_1'), ('b', 'auto_1')})
self.assertEqual(changes['b'][0].dependencies, [('__setting__', 'AUTH_USER_MODEL')])
@override_settings(AUTH_USER_MODEL="b.Tenant")
def test_circular_dependency_swappable2(self):
"""
Tests that the dependency resolver knows to explicitly resolve
swappable models but with the swappable not being the first migrated
model (#23322)
"""
address = ModelState("a", "Address", [
("id", models.AutoField(primary_key=True)),
("tenant", models.ForeignKey(settings.AUTH_USER_MODEL)),
])
tenant = ModelState("b", "Tenant", [
("id", models.AutoField(primary_key=True)),
("primary_address", models.ForeignKey("a.Address"))],
bases=(AbstractBaseUser, )
)
# Make state
before = self.make_project_state([])
after = self.make_project_state([address, tenant])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'a', 2)
self.assertNumberMigrations(changes, 'b', 1)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'a', 1, ["AddField"])
self.assertOperationTypes(changes, 'b', 0, ["CreateModel"])
self.assertEqual(changes['a'][0].dependencies, [])
self.assertEqual(set(changes['a'][1].dependencies), {('__setting__', 'AUTH_USER_MODEL'), ('a', 'auto_1')})
self.assertEqual(changes['b'][0].dependencies, [('a', 'auto_1')])
@override_settings(AUTH_USER_MODEL="a.Person")
def test_circular_dependency_swappable_self(self):
"""
Tests that the dependency resolver knows to explicitly resolve
swappable models (#23322)
"""
person = ModelState("a", "Person", [
("id", models.AutoField(primary_key=True)),
("parent1", models.ForeignKey(settings.AUTH_USER_MODEL, related_name='children'))
])
# Make state
before = self.make_project_state([])
after = self.make_project_state([person])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'a', 1)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel"])
self.assertEqual(changes['a'][0].dependencies, [])
|
bsd-3-clause
|
lgiommi/root
|
interpreter/llvm/src/utils/lit/lit/ShUtil.py
|
83
|
12179
|
from __future__ import absolute_import
import itertools
import lit.util
from lit.ShCommands import Command, Pipeline, Seq
class ShLexer:
def __init__(self, data, win32Escapes = False):
self.data = data
self.pos = 0
self.end = len(data)
self.win32Escapes = win32Escapes
def eat(self):
c = self.data[self.pos]
self.pos += 1
return c
def look(self):
return self.data[self.pos]
def maybe_eat(self, c):
"""
maybe_eat(c) - Consume the character c if it is the next character,
returning True if a character was consumed. """
if self.data[self.pos] == c:
self.pos += 1
return True
return False
def lex_arg_fast(self, c):
# Get the leading whitespace free section.
chunk = self.data[self.pos - 1:].split(None, 1)[0]
# If it has special characters, the fast path failed.
if ('|' in chunk or '&' in chunk or
'<' in chunk or '>' in chunk or
"'" in chunk or '"' in chunk or
';' in chunk or '\\' in chunk):
return None
self.pos = self.pos - 1 + len(chunk)
return chunk
def lex_arg_slow(self, c):
if c in "'\"":
str = self.lex_arg_quoted(c)
else:
str = c
while self.pos != self.end:
c = self.look()
if c.isspace() or c in "|&;":
break
elif c in '><':
# This is an annoying case; we treat '2>' as a single token so
# we don't have to track whitespace tokens.
# If the parse string isn't an integer, do the usual thing.
if not str.isdigit():
break
# Otherwise, lex the operator and convert to a redirection
# token.
num = int(str)
tok = self.lex_one_token()
assert isinstance(tok, tuple) and len(tok) == 1
return (tok[0], num)
elif c == '"':
self.eat()
str += self.lex_arg_quoted('"')
elif c == "'":
self.eat()
str += self.lex_arg_quoted("'")
elif not self.win32Escapes and c == '\\':
# Outside of a string, '\\' escapes everything.
self.eat()
if self.pos == self.end:
lit.util.warning(
"escape at end of quoted argument in: %r" % self.data)
return str
str += self.eat()
else:
str += self.eat()
return str
def lex_arg_quoted(self, delim):
str = ''
while self.pos != self.end:
c = self.eat()
if c == delim:
return str
elif c == '\\' and delim == '"':
# Inside a '"' quoted string, '\\' only escapes the quote
# character and backslash, otherwise it is preserved.
if self.pos == self.end:
lit.util.warning(
"escape at end of quoted argument in: %r" % self.data)
return str
c = self.eat()
if c == '"': #
str += '"'
elif c == '\\':
str += '\\'
else:
str += '\\' + c
else:
str += c
lit.util.warning("missing quote character in %r" % self.data)
return str
def lex_arg_checked(self, c):
pos = self.pos
res = self.lex_arg_fast(c)
end = self.pos
self.pos = pos
reference = self.lex_arg_slow(c)
if res is not None:
if res != reference:
raise ValueError("Fast path failure: %r != %r" % (
res, reference))
if self.pos != end:
raise ValueError("Fast path failure: %r != %r" % (
self.pos, end))
return reference
def lex_arg(self, c):
return self.lex_arg_fast(c) or self.lex_arg_slow(c)
def lex_one_token(self):
"""
lex_one_token - Lex a single 'sh' token. """
c = self.eat()
if c == ';':
return (c,)
if c == '|':
if self.maybe_eat('|'):
return ('||',)
return (c,)
if c == '&':
if self.maybe_eat('&'):
return ('&&',)
if self.maybe_eat('>'):
return ('&>',)
return (c,)
if c == '>':
if self.maybe_eat('&'):
return ('>&',)
if self.maybe_eat('>'):
return ('>>',)
return (c,)
if c == '<':
if self.maybe_eat('&'):
return ('<&',)
if self.maybe_eat('>'):
return ('<<',)
return (c,)
return self.lex_arg(c)
def lex(self):
while self.pos != self.end:
if self.look().isspace():
self.eat()
else:
yield self.lex_one_token()
###
class ShParser:
def __init__(self, data, win32Escapes = False, pipefail = False):
self.data = data
self.pipefail = pipefail
self.tokens = ShLexer(data, win32Escapes = win32Escapes).lex()
def lex(self):
for item in self.tokens:
return item
return None
def look(self):
token = self.lex()
if token is not None:
self.tokens = itertools.chain([token], self.tokens)
return token
def parse_command(self):
tok = self.lex()
if not tok:
raise ValueError("empty command!")
if isinstance(tok, tuple):
raise ValueError("syntax error near unexpected token %r" % tok[0])
args = [tok]
redirects = []
while 1:
tok = self.look()
# EOF?
if tok is None:
break
# If this is an argument, just add it to the current command.
if isinstance(tok, str):
args.append(self.lex())
continue
# Otherwise see if it is a terminator.
assert isinstance(tok, tuple)
if tok[0] in ('|',';','&','||','&&'):
break
# Otherwise it must be a redirection.
op = self.lex()
arg = self.lex()
if not arg:
raise ValueError("syntax error near token %r" % op[0])
redirects.append((op, arg))
return Command(args, redirects)
def parse_pipeline(self):
negate = False
commands = [self.parse_command()]
while self.look() == ('|',):
self.lex()
commands.append(self.parse_command())
return Pipeline(commands, negate, self.pipefail)
def parse(self):
lhs = self.parse_pipeline()
while self.look():
operator = self.lex()
assert isinstance(operator, tuple) and len(operator) == 1
if not self.look():
raise ValueError(
"missing argument to operator %r" % operator[0])
# FIXME: Operator precedence!!
lhs = Seq(lhs, operator[0], self.parse_pipeline())
return lhs
###
import unittest
class TestShLexer(unittest.TestCase):
def lex(self, str, *args, **kwargs):
return list(ShLexer(str, *args, **kwargs).lex())
def test_basic(self):
self.assertEqual(self.lex('a|b>c&d<e;f'),
['a', ('|',), 'b', ('>',), 'c', ('&',), 'd',
('<',), 'e', (';',), 'f'])
def test_redirection_tokens(self):
self.assertEqual(self.lex('a2>c'),
['a2', ('>',), 'c'])
self.assertEqual(self.lex('a 2>c'),
['a', ('>',2), 'c'])
def test_quoting(self):
self.assertEqual(self.lex(""" 'a' """),
['a'])
self.assertEqual(self.lex(""" "hello\\"world" """),
['hello"world'])
self.assertEqual(self.lex(""" "hello\\'world" """),
["hello\\'world"])
self.assertEqual(self.lex(""" "hello\\\\world" """),
["hello\\world"])
self.assertEqual(self.lex(""" he"llo wo"rld """),
["hello world"])
self.assertEqual(self.lex(""" a\\ b a\\\\b """),
["a b", "a\\b"])
self.assertEqual(self.lex(""" "" "" """),
["", ""])
self.assertEqual(self.lex(""" a\\ b """, win32Escapes = True),
['a\\', 'b'])
class TestShParse(unittest.TestCase):
def parse(self, str):
return ShParser(str).parse()
def test_basic(self):
self.assertEqual(self.parse('echo hello'),
Pipeline([Command(['echo', 'hello'], [])], False))
self.assertEqual(self.parse('echo ""'),
Pipeline([Command(['echo', ''], [])], False))
self.assertEqual(self.parse("""echo -DFOO='a'"""),
Pipeline([Command(['echo', '-DFOO=a'], [])], False))
self.assertEqual(self.parse('echo -DFOO="a"'),
Pipeline([Command(['echo', '-DFOO=a'], [])], False))
def test_redirection(self):
self.assertEqual(self.parse('echo hello > c'),
Pipeline([Command(['echo', 'hello'],
[((('>'),), 'c')])], False))
self.assertEqual(self.parse('echo hello > c >> d'),
Pipeline([Command(['echo', 'hello'], [(('>',), 'c'),
(('>>',), 'd')])], False))
self.assertEqual(self.parse('a 2>&1'),
Pipeline([Command(['a'], [(('>&',2), '1')])], False))
def test_pipeline(self):
self.assertEqual(self.parse('a | b'),
Pipeline([Command(['a'], []),
Command(['b'], [])],
False))
self.assertEqual(self.parse('a | b | c'),
Pipeline([Command(['a'], []),
Command(['b'], []),
Command(['c'], [])],
False))
def test_list(self):
self.assertEqual(self.parse('a ; b'),
Seq(Pipeline([Command(['a'], [])], False),
';',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a & b'),
Seq(Pipeline([Command(['a'], [])], False),
'&',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a && b'),
Seq(Pipeline([Command(['a'], [])], False),
'&&',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a || b'),
Seq(Pipeline([Command(['a'], [])], False),
'||',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a && b || c'),
Seq(Seq(Pipeline([Command(['a'], [])], False),
'&&',
Pipeline([Command(['b'], [])], False)),
'||',
Pipeline([Command(['c'], [])], False)))
self.assertEqual(self.parse('a; b'),
Seq(Pipeline([Command(['a'], [])], False),
';',
Pipeline([Command(['b'], [])], False)))
if __name__ == '__main__':
unittest.main()
|
lgpl-2.1
|
bworrell/mixbox
|
mixbox/parser.py
|
1
|
6500
|
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from abc import ABCMeta, abstractmethod
from distutils.version import StrictVersion
from .exceptions import ignored
from .xml import get_etree_root, get_schemaloc_pairs
class UnknownVersionError(Exception):
"""A parsed document contains no version information."""
pass
class UnsupportedVersionError(Exception):
"""A parsed document is a version unsupported by the parser."""
def __init__(self, message, expected=None, found=None):
super(UnsupportedVersionError, self).__init__(message)
self.expected = expected
self.found = found
class UnsupportedRootElementError(Exception):
"""A parsed document contains an unsupported root element."""
def __init__(self, message, expected=None, found=None):
super(UnsupportedRootElementError, self).__init__(message)
self.expected = expected
self.found = found
class EntityParser(object):
__metaclass__ = ABCMeta
@abstractmethod
def supported_tags(self):
"""Return an iterable of supported document root tags (strings)."""
@abstractmethod
def get_version(self, root):
"""Return as a string the schema version used by the document root."""
@abstractmethod
def supported_versions(self, tag):
"""Return all the supported versions for a given tag."""
@abstractmethod
def get_entity_class(self, tag):
"""Return the class to be returned as the result of parsing."""
def _get_version(self, root):
"""Return the version of the root element passed in.
Args:
root (etree.Element)
Returns:
distutils.StrictVersion
Raises:
UnknownVersionError
"""
# Note: STIX and MAEC use a "version" attribute. To support CybOX, a
# subclass will need to combine "cybox_major_version",
# "cybox_minor_version", and "cybox_update_version".
version = self.get_version(root)
if version:
return StrictVersion(version)
raise UnknownVersionError(
"Unable to determine the version of the input document. No "
"version information found on the root element."
)
def _check_version(self, root):
"""Ensure the root element is a supported version.
Args:
root (etree.Element)
Raises:
UnsupportedVersionError
"""
version = self._get_version(root)
supported = [StrictVersion(x) for x in
self.supported_versions(root.tag)]
if version in supported:
return
error = "Document version ({0}) not in supported versions ({1})"
raise UnsupportedVersionError(
message=error.format(version, supported),
expected=supported,
found=version
)
def _check_root_tag(self, root):
"""Check that the XML element tree has a supported root element.
Args:
root (etree.Element)
Raises:
UnsupportedRootElementError
"""
supported = self.supported_tags()
if root.tag in supported:
return
error = "Document root element ({0}) not one of ({1})"
raise UnsupportedRootElementError(
message=error.format(root.tag, supported),
expected=supported,
found=root.tag,
)
def parse_xml_to_obj(self, xml_file, check_version=True, check_root=True,
encoding=None):
"""Creates a STIX binding object from the supplied xml file.
Args:
xml_file: A filename/path or a file-like object representing a STIX
instance document
check_version: Inspect the version before parsing.
check_root: Inspect the root element before parsing.
encoding: The character encoding of the input `xml_file`.
Raises:
.UnknownVersionError: If `check_version` is ``True`` and `xml_file`
does not contain STIX version information.
.UnsupportedVersionError: If `check_version` is ``False`` and
`xml_file` contains an unsupported STIX version.
.UnsupportedRootElement: If `check_root` is ``True`` and `xml_file`
contains an invalid root element.
"""
root = get_etree_root(xml_file, encoding=encoding)
if check_root:
self._check_root_tag(root)
if check_version:
self._check_version(root)
entity_class = self.get_entity_class(root.tag)
entity_obj = entity_class._binding_class.factory()
entity_obj.build(root)
return entity_obj
def parse_xml(self, xml_file, check_version=True, check_root=True,
encoding=None):
"""Creates a python-stix STIXPackage object from the supplied xml_file.
Args:
xml_file: A filename/path or a file-like object representing a STIX
instance document
check_version: Inspect the version before parsing.
check_root: Inspect the root element before parsing.
encoding: The character encoding of the input `xml_file`. If
``None``, an attempt will be made to determine the input
character encoding.
Raises:
.UnknownVersionError: If `check_version` is ``True`` and `xml_file`
does not contain STIX version information.
.UnsupportedVersionError: If `check_version` is ``False`` and
`xml_file` contains an unsupported STIX version.
.UnsupportedRootElement: If `check_root` is ``True`` and `xml_file`
contains an invalid root element.
"""
entity_obj = self.parse_xml_to_obj(
xml_file=xml_file,
check_version=check_version,
check_root=check_root,
encoding=encoding
)
root = get_etree_root(xml_file, encoding=encoding)
entity = self.get_entity_class(root.tag).from_obj(entity_obj)
# Save the parsed nsmap and schemalocations onto the parsed Entity
entity.__input_namespaces__ = dict(root.nsmap.iteritems())
with ignored(KeyError):
pairs = get_schemaloc_pairs(root)
entity.__input_schemalocations__ = dict(pairs)
return entity
|
bsd-3-clause
|
mancoast/CPythonPyc_test
|
fail/342_test_range.py
|
67
|
23223
|
# Python test set -- built-in functions
import test.support, unittest
import sys
import pickle
import itertools
# pure Python implementations (3 args only), for comparison
def pyrange(start, stop, step):
if (start - stop) // step < 0:
# replace stop with next element in the sequence of integers
# that are congruent to start modulo step.
stop += (start - stop) % step
while start != stop:
yield start
start += step
def pyrange_reversed(start, stop, step):
stop += (start - stop) % step
return pyrange(stop - step, start - step, -step)
class RangeTest(unittest.TestCase):
def assert_iterators_equal(self, xs, ys, test_id, limit=None):
# check that an iterator xs matches the expected results ys,
# up to a given limit.
if limit is not None:
xs = itertools.islice(xs, limit)
ys = itertools.islice(ys, limit)
sentinel = object()
pairs = itertools.zip_longest(xs, ys, fillvalue=sentinel)
for i, (x, y) in enumerate(pairs):
if x == y:
continue
elif x == sentinel:
self.fail('{}: iterator ended unexpectedly '
'at position {}; expected {}'.format(test_id, i, y))
elif y == sentinel:
self.fail('{}: unexpected excess element {} at '
'position {}'.format(test_id, x, i))
else:
self.fail('{}: wrong element at position {};'
'expected {}, got {}'.format(test_id, i, y, x))
def test_range(self):
self.assertEqual(list(range(3)), [0, 1, 2])
self.assertEqual(list(range(1, 5)), [1, 2, 3, 4])
self.assertEqual(list(range(0)), [])
self.assertEqual(list(range(-3)), [])
self.assertEqual(list(range(1, 10, 3)), [1, 4, 7])
self.assertEqual(list(range(5, -5, -3)), [5, 2, -1, -4])
a = 10
b = 100
c = 50
self.assertEqual(list(range(a, a+2)), [a, a+1])
self.assertEqual(list(range(a+2, a, -1)), [a+2, a+1])
self.assertEqual(list(range(a+4, a, -2)), [a+4, a+2])
seq = list(range(a, b, c))
self.assertIn(a, seq)
self.assertNotIn(b, seq)
self.assertEqual(len(seq), 2)
seq = list(range(b, a, -c))
self.assertIn(b, seq)
self.assertNotIn(a, seq)
self.assertEqual(len(seq), 2)
seq = list(range(-a, -b, -c))
self.assertIn(-a, seq)
self.assertNotIn(-b, seq)
self.assertEqual(len(seq), 2)
self.assertRaises(TypeError, range)
self.assertRaises(TypeError, range, 1, 2, 3, 4)
self.assertRaises(ValueError, range, 1, 2, 0)
self.assertRaises(TypeError, range, 0.0, 2, 1)
self.assertRaises(TypeError, range, 1, 2.0, 1)
self.assertRaises(TypeError, range, 1, 2, 1.0)
self.assertRaises(TypeError, range, 1e100, 1e101, 1e101)
self.assertRaises(TypeError, range, 0, "spam")
self.assertRaises(TypeError, range, 0, 42, "spam")
self.assertEqual(len(range(0, sys.maxsize, sys.maxsize-1)), 2)
r = range(-sys.maxsize, sys.maxsize, 2)
self.assertEqual(len(r), sys.maxsize)
def test_large_operands(self):
x = range(10**20, 10**20+10, 3)
self.assertEqual(len(x), 4)
self.assertEqual(len(list(x)), 4)
x = range(10**20+10, 10**20, 3)
self.assertEqual(len(x), 0)
self.assertEqual(len(list(x)), 0)
x = range(10**20, 10**20+10, -3)
self.assertEqual(len(x), 0)
self.assertEqual(len(list(x)), 0)
x = range(10**20+10, 10**20, -3)
self.assertEqual(len(x), 4)
self.assertEqual(len(list(x)), 4)
# Now test range() with longs
self.assertEqual(list(range(-2**100)), [])
self.assertEqual(list(range(0, -2**100)), [])
self.assertEqual(list(range(0, 2**100, -1)), [])
self.assertEqual(list(range(0, 2**100, -1)), [])
a = int(10 * sys.maxsize)
b = int(100 * sys.maxsize)
c = int(50 * sys.maxsize)
self.assertEqual(list(range(a, a+2)), [a, a+1])
self.assertEqual(list(range(a+2, a, -1)), [a+2, a+1])
self.assertEqual(list(range(a+4, a, -2)), [a+4, a+2])
seq = list(range(a, b, c))
self.assertIn(a, seq)
self.assertNotIn(b, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], a)
self.assertEqual(seq[-1], a+c)
seq = list(range(b, a, -c))
self.assertIn(b, seq)
self.assertNotIn(a, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], b)
self.assertEqual(seq[-1], b-c)
seq = list(range(-a, -b, -c))
self.assertIn(-a, seq)
self.assertNotIn(-b, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], -a)
self.assertEqual(seq[-1], -a-c)
def test_large_range(self):
# Check long ranges (len > sys.maxsize)
# len() is expected to fail due to limitations of the __len__ protocol
def _range_len(x):
try:
length = len(x)
except OverflowError:
step = x[1] - x[0]
length = 1 + ((x[-1] - x[0]) // step)
return length
a = -sys.maxsize
b = sys.maxsize
expected_len = b - a
x = range(a, b)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+idx)
self.assertEqual(x[idx:idx+1][0], a+idx)
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
a = 0
b = 2 * sys.maxsize
expected_len = b - a
x = range(a, b)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+idx)
self.assertEqual(x[idx:idx+1][0], a+idx)
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
a = 0
b = sys.maxsize**10
c = 2*sys.maxsize
expected_len = 1 + (b - a) // c
x = range(a, b, c)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+(idx*c))
self.assertEqual(x[idx:idx+1][0], a+(idx*c))
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
a = sys.maxsize**10
b = 0
c = -2*sys.maxsize
expected_len = 1 + (b - a) // c
x = range(a, b, c)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+(idx*c))
self.assertEqual(x[idx:idx+1][0], a+(idx*c))
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
def test_invalid_invocation(self):
self.assertRaises(TypeError, range)
self.assertRaises(TypeError, range, 1, 2, 3, 4)
self.assertRaises(ValueError, range, 1, 2, 0)
a = int(10 * sys.maxsize)
self.assertRaises(ValueError, range, a, a + 1, int(0))
self.assertRaises(TypeError, range, 1., 1., 1.)
self.assertRaises(TypeError, range, 1e100, 1e101, 1e101)
self.assertRaises(TypeError, range, 0, "spam")
self.assertRaises(TypeError, range, 0, 42, "spam")
# Exercise various combinations of bad arguments, to check
# refcounting logic
self.assertRaises(TypeError, range, 0.0)
self.assertRaises(TypeError, range, 0, 0.0)
self.assertRaises(TypeError, range, 0.0, 0)
self.assertRaises(TypeError, range, 0.0, 0.0)
self.assertRaises(TypeError, range, 0, 0, 1.0)
self.assertRaises(TypeError, range, 0, 0.0, 1)
self.assertRaises(TypeError, range, 0, 0.0, 1.0)
self.assertRaises(TypeError, range, 0.0, 0, 1)
self.assertRaises(TypeError, range, 0.0, 0, 1.0)
self.assertRaises(TypeError, range, 0.0, 0.0, 1)
self.assertRaises(TypeError, range, 0.0, 0.0, 1.0)
def test_index(self):
u = range(2)
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = range(-2, 3)
self.assertEqual(u.count(0), 1)
self.assertEqual(u.index(0), 2)
self.assertRaises(TypeError, u.index)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = range(4)
self.assertRaises(BadExc, a.index, BadCmp())
a = range(-2, 3)
self.assertEqual(a.index(0), 2)
self.assertEqual(range(1, 10, 3).index(4), 1)
self.assertEqual(range(1, -10, -3).index(-5), 2)
self.assertEqual(range(10**20).index(1), 1)
self.assertEqual(range(10**20).index(10**20 - 1), 10**20 - 1)
self.assertRaises(ValueError, range(1, 2**100, 2).index, 2**87)
self.assertEqual(range(1, 2**100, 2).index(2**87+1), 2**86)
class AlwaysEqual(object):
def __eq__(self, other):
return True
always_equal = AlwaysEqual()
self.assertEqual(range(10).index(always_equal), 0)
def test_user_index_method(self):
bignum = 2*sys.maxsize
smallnum = 42
# User-defined class with an __index__ method
class I:
def __init__(self, n):
self.n = int(n)
def __index__(self):
return self.n
self.assertEqual(list(range(I(bignum), I(bignum + 1))), [bignum])
self.assertEqual(list(range(I(smallnum), I(smallnum + 1))), [smallnum])
# User-defined class with a failing __index__ method
class IX:
def __index__(self):
raise RuntimeError
self.assertRaises(RuntimeError, range, IX())
# User-defined class with an invalid __index__ method
class IN:
def __index__(self):
return "not a number"
self.assertRaises(TypeError, range, IN())
# Test use of user-defined classes in slice indices.
self.assertEqual(range(10)[:I(5)], range(5))
with self.assertRaises(RuntimeError):
range(0, 10)[:IX()]
with self.assertRaises(TypeError):
range(0, 10)[:IN()]
def test_count(self):
self.assertEqual(range(3).count(-1), 0)
self.assertEqual(range(3).count(0), 1)
self.assertEqual(range(3).count(1), 1)
self.assertEqual(range(3).count(2), 1)
self.assertEqual(range(3).count(3), 0)
self.assertIs(type(range(3).count(-1)), int)
self.assertIs(type(range(3).count(1)), int)
self.assertEqual(range(10**20).count(1), 1)
self.assertEqual(range(10**20).count(10**20), 0)
self.assertEqual(range(3).index(1), 1)
self.assertEqual(range(1, 2**100, 2).count(2**87), 0)
self.assertEqual(range(1, 2**100, 2).count(2**87+1), 1)
class AlwaysEqual(object):
def __eq__(self, other):
return True
always_equal = AlwaysEqual()
self.assertEqual(range(10).count(always_equal), 10)
self.assertEqual(len(range(sys.maxsize, sys.maxsize+10)), 10)
def test_repr(self):
self.assertEqual(repr(range(1)), 'range(0, 1)')
self.assertEqual(repr(range(1, 2)), 'range(1, 2)')
self.assertEqual(repr(range(1, 2, 3)), 'range(1, 2, 3)')
def test_pickling(self):
testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1),
(13, 21, 3), (-2, 2, 2), (2**65, 2**65+2)]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for t in testcases:
with self.subTest(proto=proto, test=t):
r = range(*t)
self.assertEqual(list(pickle.loads(pickle.dumps(r, proto))),
list(r))
def test_iterator_pickling(self):
testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1),
(13, 21, 3), (-2, 2, 2), (2**65, 2**65+2)]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for t in testcases:
it = itorg = iter(range(*t))
data = list(range(*t))
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(list(it), data)
it = pickle.loads(d)
try:
next(it)
except StopIteration:
continue
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(list(it), data[1:])
def test_exhausted_iterator_pickling(self):
r = range(2**65, 2**65+2)
i = iter(r)
while True:
r = next(i)
if r == 2**65+1:
break
d = pickle.dumps(i)
i2 = pickle.loads(d)
self.assertEqual(list(i), [])
self.assertEqual(list(i2), [])
def test_large_exhausted_iterator_pickling(self):
r = range(20)
i = iter(r)
while True:
r = next(i)
if r == 19:
break
d = pickle.dumps(i)
i2 = pickle.loads(d)
self.assertEqual(list(i), [])
self.assertEqual(list(i2), [])
def test_odd_bug(self):
# This used to raise a "SystemError: NULL result without error"
# because the range validation step was eating the exception
# before NULL was returned.
with self.assertRaises(TypeError):
range([], 1, -1)
def test_types(self):
# Non-integer objects *equal* to any of the range's items are supposed
# to be contained in the range.
self.assertIn(1.0, range(3))
self.assertIn(True, range(3))
self.assertIn(1+0j, range(3))
class C1:
def __eq__(self, other): return True
self.assertIn(C1(), range(3))
# Objects are never coerced into other types for comparison.
class C2:
def __int__(self): return 1
def __index__(self): return 1
self.assertNotIn(C2(), range(3))
# ..except if explicitly told so.
self.assertIn(int(C2()), range(3))
# Check that the range.__contains__ optimization is only
# used for ints, not for instances of subclasses of int.
class C3(int):
def __eq__(self, other): return True
self.assertIn(C3(11), range(10))
self.assertIn(C3(11), list(range(10)))
def test_strided_limits(self):
r = range(0, 101, 2)
self.assertIn(0, r)
self.assertNotIn(1, r)
self.assertIn(2, r)
self.assertNotIn(99, r)
self.assertIn(100, r)
self.assertNotIn(101, r)
r = range(0, -20, -1)
self.assertIn(0, r)
self.assertIn(-1, r)
self.assertIn(-19, r)
self.assertNotIn(-20, r)
r = range(0, -20, -2)
self.assertIn(-18, r)
self.assertNotIn(-19, r)
self.assertNotIn(-20, r)
def test_empty(self):
r = range(0)
self.assertNotIn(0, r)
self.assertNotIn(1, r)
r = range(0, -10)
self.assertNotIn(0, r)
self.assertNotIn(-1, r)
self.assertNotIn(1, r)
def test_range_iterators(self):
# exercise 'fast' iterators, that use a rangeiterobject internally.
# see issue 7298
limits = [base + jiggle
for M in (2**32, 2**64)
for base in (-M, -M//2, 0, M//2, M)
for jiggle in (-2, -1, 0, 1, 2)]
test_ranges = [(start, end, step)
for start in limits
for end in limits
for step in (-2**63, -2**31, -2, -1, 1, 2)]
for start, end, step in test_ranges:
iter1 = range(start, end, step)
iter2 = pyrange(start, end, step)
test_id = "range({}, {}, {})".format(start, end, step)
# check first 100 entries
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
iter1 = reversed(range(start, end, step))
iter2 = pyrange_reversed(start, end, step)
test_id = "reversed(range({}, {}, {}))".format(start, end, step)
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
def test_slice(self):
def check(start, stop, step=None):
i = slice(start, stop, step)
self.assertEqual(list(r[i]), list(r)[i])
self.assertEqual(len(r[i]), len(list(r)[i]))
for r in [range(10),
range(0),
range(1, 9, 3),
range(8, 0, -3),
range(sys.maxsize+1, sys.maxsize+10),
]:
check(0, 2)
check(0, 20)
check(1, 2)
check(20, 30)
check(-30, -20)
check(-1, 100, 2)
check(0, -1)
check(-1, -3, -1)
def test_contains(self):
r = range(10)
self.assertIn(0, r)
self.assertIn(1, r)
self.assertIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
r = range(9, -1, -1)
self.assertIn(0, r)
self.assertIn(1, r)
self.assertIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
r = range(0, 10, 2)
self.assertIn(0, r)
self.assertNotIn(1, r)
self.assertNotIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
r = range(9, -1, -2)
self.assertNotIn(0, r)
self.assertIn(1, r)
self.assertIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
def test_reverse_iteration(self):
for r in [range(10),
range(0),
range(1, 9, 3),
range(8, 0, -3),
range(sys.maxsize+1, sys.maxsize+10),
]:
self.assertEqual(list(reversed(r)), list(r)[::-1])
def test_issue11845(self):
r = range(*slice(1, 18, 2).indices(20))
values = {None, 0, 1, -1, 2, -2, 5, -5, 19, -19,
20, -20, 21, -21, 30, -30, 99, -99}
for i in values:
for j in values:
for k in values - {0}:
r[i:j:k]
def test_comparison(self):
test_ranges = [range(0), range(0, -1), range(1, 1, 3),
range(1), range(5, 6), range(5, 6, 2),
range(5, 7, 2), range(2), range(0, 4, 2),
range(0, 5, 2), range(0, 6, 2)]
test_tuples = list(map(tuple, test_ranges))
# Check that equality of ranges matches equality of the corresponding
# tuples for each pair from the test lists above.
ranges_eq = [a == b for a in test_ranges for b in test_ranges]
tuples_eq = [a == b for a in test_tuples for b in test_tuples]
self.assertEqual(ranges_eq, tuples_eq)
# Check that != correctly gives the logical negation of ==
ranges_ne = [a != b for a in test_ranges for b in test_ranges]
self.assertEqual(ranges_ne, [not x for x in ranges_eq])
# Equal ranges should have equal hashes.
for a in test_ranges:
for b in test_ranges:
if a == b:
self.assertEqual(hash(a), hash(b))
# Ranges are unequal to other types (even sequence types)
self.assertIs(range(0) == (), False)
self.assertIs(() == range(0), False)
self.assertIs(range(2) == [0, 1], False)
# Huge integers aren't a problem.
self.assertEqual(range(0, 2**100 - 1, 2),
range(0, 2**100, 2))
self.assertEqual(hash(range(0, 2**100 - 1, 2)),
hash(range(0, 2**100, 2)))
self.assertNotEqual(range(0, 2**100, 2),
range(0, 2**100 + 1, 2))
self.assertEqual(range(2**200, 2**201 - 2**99, 2**100),
range(2**200, 2**201, 2**100))
self.assertEqual(hash(range(2**200, 2**201 - 2**99, 2**100)),
hash(range(2**200, 2**201, 2**100)))
self.assertNotEqual(range(2**200, 2**201, 2**100),
range(2**200, 2**201 + 1, 2**100))
# Order comparisons are not implemented for ranges.
with self.assertRaises(TypeError):
range(0) < range(0)
with self.assertRaises(TypeError):
range(0) > range(0)
with self.assertRaises(TypeError):
range(0) <= range(0)
with self.assertRaises(TypeError):
range(0) >= range(0)
def test_attributes(self):
# test the start, stop and step attributes of range objects
self.assert_attrs(range(0), 0, 0, 1)
self.assert_attrs(range(10), 0, 10, 1)
self.assert_attrs(range(-10), 0, -10, 1)
self.assert_attrs(range(0, 10, 1), 0, 10, 1)
self.assert_attrs(range(0, 10, 3), 0, 10, 3)
self.assert_attrs(range(10, 0, -1), 10, 0, -1)
self.assert_attrs(range(10, 0, -3), 10, 0, -3)
def assert_attrs(self, rangeobj, start, stop, step):
self.assertEqual(rangeobj.start, start)
self.assertEqual(rangeobj.stop, stop)
self.assertEqual(rangeobj.step, step)
with self.assertRaises(AttributeError):
rangeobj.start = 0
with self.assertRaises(AttributeError):
rangeobj.stop = 10
with self.assertRaises(AttributeError):
rangeobj.step = 1
with self.assertRaises(AttributeError):
del rangeobj.start
with self.assertRaises(AttributeError):
del rangeobj.stop
with self.assertRaises(AttributeError):
del rangeobj.step
def test_main():
test.support.run_unittest(RangeTest)
if __name__ == "__main__":
test_main()
|
gpl-3.0
|
zofuthan/edx-platform
|
cms/djangoapps/course_creators/tests/test_views.py
|
98
|
4497
|
"""
Tests course_creators.views.py.
"""
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.test import TestCase, RequestFactory
from course_creators.views import add_user_with_status_unrequested, add_user_with_status_granted
from course_creators.views import get_course_creator_status, update_course_creator_group, user_requested_access
import mock
from student.roles import CourseCreatorRole
from student import auth
from edxmako.tests import mako_middleware_process_request
class CourseCreatorView(TestCase):
"""
Tests for modifying the course creator table.
"""
def setUp(self):
""" Test case setup """
super(CourseCreatorView, self).setUp()
self.user = User.objects.create_user('test_user', 'test_user+courses@edx.org', 'foo')
self.admin = User.objects.create_user('Mark', 'admin+courses@edx.org', 'foo')
self.admin.is_staff = True
def test_staff_permission_required(self):
"""
Tests that any method changing the course creator authz group must be called with staff permissions.
"""
with self.assertRaises(PermissionDenied):
add_user_with_status_granted(self.user, self.user)
with self.assertRaises(PermissionDenied):
update_course_creator_group(self.user, self.user, True)
def test_table_initially_empty(self):
self.assertIsNone(get_course_creator_status(self.user))
def test_add_unrequested(self):
add_user_with_status_unrequested(self.user)
self.assertEqual('unrequested', get_course_creator_status(self.user))
# Calling add again will be a no-op (even if state is different).
add_user_with_status_granted(self.admin, self.user)
self.assertEqual('unrequested', get_course_creator_status(self.user))
def test_add_granted(self):
with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}):
# Calling add_user_with_status_granted impacts is_user_in_course_group_role.
self.assertFalse(auth.user_has_role(self.user, CourseCreatorRole()))
add_user_with_status_granted(self.admin, self.user)
self.assertEqual('granted', get_course_creator_status(self.user))
# Calling add again will be a no-op (even if state is different).
add_user_with_status_unrequested(self.user)
self.assertEqual('granted', get_course_creator_status(self.user))
self.assertTrue(auth.user_has_role(self.user, CourseCreatorRole()))
def test_update_creator_group(self):
with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}):
self.assertFalse(auth.user_has_role(self.user, CourseCreatorRole()))
update_course_creator_group(self.admin, self.user, True)
self.assertTrue(auth.user_has_role(self.user, CourseCreatorRole()))
update_course_creator_group(self.admin, self.user, False)
self.assertFalse(auth.user_has_role(self.user, CourseCreatorRole()))
def test_user_requested_access(self):
add_user_with_status_unrequested(self.user)
self.assertEqual('unrequested', get_course_creator_status(self.user))
request = RequestFactory().get('/')
request.user = self.user
mako_middleware_process_request(request)
user_requested_access(self.user)
self.assertEqual('pending', get_course_creator_status(self.user))
def test_user_requested_already_granted(self):
add_user_with_status_granted(self.admin, self.user)
self.assertEqual('granted', get_course_creator_status(self.user))
# Will not "downgrade" to pending because that would require removing the
# user from the authz course creator group (and that can only be done by an admin).
user_requested_access(self.user)
self.assertEqual('granted', get_course_creator_status(self.user))
def test_add_user_unrequested_staff(self):
# Users marked as is_staff will not be added to the course creator table.
add_user_with_status_unrequested(self.admin)
self.assertIsNone(get_course_creator_status(self.admin))
def test_add_user_granted_staff(self):
# Users marked as is_staff will not be added to the course creator table.
add_user_with_status_granted(self.admin, self.admin)
self.assertIsNone(get_course_creator_status(self.admin))
|
agpl-3.0
|
mikulucky/java-design-patterns
|
_scripts/postPumlsToServer.py
|
2
|
2860
|
#
# The MIT License
# Copyright (c) 2014 Ilkka SeppΓ€lΓ€
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import requests, glob, re, os
# taken from here: http://stackoverflow.com/a/13641746
def replace(file, pattern, subst):
# Read contents from file as a single string
file_handle = open(file, 'r')
file_string = file_handle.read()
file_handle.close()
# Use RE package to allow for replacement (also allowing for (multiline) REGEX)
file_string = (re.sub(pattern, subst, file_string))
# Write contents to file.
# Using mode 'w' truncates the file.
file_handle = open(file, 'w')
file_handle.write(file_string)
file_handle.close()
# list of all puml files
fileList = glob.glob('*/etc/*.puml')
for puml in fileList:
pathSplit = puml.split("/")
# parent folder
parent = pathSplit[0]
# individual artifact/project name
artifact = pathSplit[2].replace(".urm.puml", "")
print "parent: " + parent + "; artifact: " + artifact
# do a POST to the official plantuml hosting site with a little trick "!includeurl" and raw github content
data = {
'text': "!includeurl https://raw.githubusercontent.com/iluwatar/java-design-patterns/master/" + puml
}
r = requests.post('http://plantuml.com/plantuml/uml', data=data)
pumlId = r.url.replace("http://plantuml.com/plantuml/uml/", "")
# the only thing needed to get a png/svg/ascii from the server back
print "Puml Server ID: " + pumlId
# add the id so jekyll/liquid can use it
if (parent == artifact):
replace("./" + parent + "/README.md", "categories:", "pumlid: {}\\ncategories:".format(pumlId))
else:
print "I dont want to program this, just add the following lines to the README.md file that corresponds to this puml file '" + puml + "'\npumlid: {}".format(pumlId)
|
mit
|
urska19/LVR-sat
|
src/graphColoring.py
|
1
|
1511
|
#!/usr/bin/env python
from logConstructs import *
def graph_coloring(graph, colors):
if len(graph) < colors:
return False
variables=[[None for i in range(colors)] for j in range(len(graph))]
#construct variables
for i in range(len(graph)):
for j in range(colors):
variables[i][j] = Var("X" + str(i) + "" + str(j))
#construct first sub formula - node must be colored
main_formula = And(map(lambda x: Or(x), variables))
#construct second sub formula - node must be colored with one color
subformula = []
for k in range(colors - 1):
for l in range(k + 1, colors):
subformula += map(lambda x: Not(And([x[k], x[l]])), variables)
#construct third sub formula - connected nodes have different colors
for i in range(len(graph) - 1):
for j in range(i + 1, len(graph)):
if graph[i][j] == 1:
subformula += map(lambda x: Not(And([variables[i][x], variables[j][x]])), range(colors))
main_formula = And(subformula + main_formula.clause)
return main_formula.simplify()
def printGraph(graph):
result = ""
for i in range(len(graph)):
for j in range(len(graph)):
result += " " + str(graph[i][j]) + " "
result += "\n"
return result
def processResult(result):
mappings = {}
for key in result:
node = key[1]
color = key[2]
if result[key]:
mappings[int(node)] = int(color)
return mappings
|
bsd-3-clause
|
pylixm/sae-django-demo
|
django1.7-sae/site-packages/django/contrib/admin/helpers.py
|
13
|
14132
|
from __future__ import unicode_literals
from django import forms
from django.contrib.admin.utils import (flatten_fieldsets, lookup_field,
display_for_field, label_for_field, help_text_for_field)
from django.contrib.admin.templatetags.admin_static import static
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.fields.related import ManyToManyRel
from django.forms.utils import flatatt
from django.template.defaultfilters import capfirst, linebreaksbr
from django.utils.encoding import force_text, smart_text
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
ACTION_CHECKBOX_NAME = '_selected_action'
class ActionForm(forms.Form):
action = forms.ChoiceField(label=_('Action:'))
select_across = forms.BooleanField(label='', required=False, initial=0,
widget=forms.HiddenInput({'class': 'select-across'}))
checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False)
class AdminForm(object):
def __init__(self, form, fieldsets, prepopulated_fields, readonly_fields=None, model_admin=None):
self.form, self.fieldsets = form, fieldsets
self.prepopulated_fields = [{
'field': form[field_name],
'dependencies': [form[f] for f in dependencies]
} for field_name, dependencies in prepopulated_fields.items()]
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for name, options in self.fieldsets:
yield Fieldset(
self.form, name,
readonly_fields=self.readonly_fields,
model_admin=self.model_admin,
**options
)
def _media(self):
media = self.form.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class Fieldset(object):
def __init__(self, form, name=None, readonly_fields=(), fields=(), classes=(),
description=None, model_admin=None):
self.form = form
self.name, self.fields = name, fields
self.classes = ' '.join(classes)
self.description = description
self.model_admin = model_admin
self.readonly_fields = readonly_fields
def _media(self):
if 'collapse' in self.classes:
extra = '' if settings.DEBUG else '.min'
js = ['jquery%s.js' % extra,
'jquery.init.js',
'collapse%s.js' % extra]
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
return forms.Media()
media = property(_media)
def __iter__(self):
for field in self.fields:
yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin)
class Fieldline(object):
def __init__(self, form, field, readonly_fields=None, model_admin=None):
self.form = form # A django.forms.Form instance
if not hasattr(field, "__iter__") or isinstance(field, six.text_type):
self.fields = [field]
else:
self.fields = field
self.has_visible_field = not all(field in self.form.fields and
self.form.fields[field].widget.is_hidden
for field in self.fields)
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for i, field in enumerate(self.fields):
if field in self.readonly_fields:
yield AdminReadonlyField(self.form, field, is_first=(i == 0),
model_admin=self.model_admin)
else:
yield AdminField(self.form, field, is_first=(i == 0))
def errors(self):
return mark_safe('\n'.join(self.form[f].errors.as_ul() for f in self.fields if f not in self.readonly_fields).strip('\n'))
class AdminField(object):
def __init__(self, form, field, is_first):
self.field = form[field] # A django.forms.BoundField instance
self.is_first = is_first # Whether this field is first on the line
self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput)
def label_tag(self):
classes = []
contents = conditional_escape(force_text(self.field.label))
if self.is_checkbox:
classes.append('vCheckboxLabel')
if self.field.field.required:
classes.append('required')
if not self.is_first:
classes.append('inline')
attrs = {'class': ' '.join(classes)} if classes else {}
# checkboxes should not have a label suffix as the checkbox appears
# to the left of the label.
return self.field.label_tag(contents=mark_safe(contents), attrs=attrs,
label_suffix='' if self.is_checkbox else None)
def errors(self):
return mark_safe(self.field.errors.as_ul())
class AdminReadonlyField(object):
def __init__(self, form, field, is_first, model_admin=None):
# Make self.field look a little bit like a field. This means that
# {{ field.name }} must be a useful class name to identify the field.
# For convenience, store other field-related data here too.
if callable(field):
class_name = field.__name__ if field.__name__ != '<lambda>' else ''
else:
class_name = field
if form._meta.labels and class_name in form._meta.labels:
label = form._meta.labels[class_name]
else:
label = label_for_field(field, form._meta.model, model_admin)
if form._meta.help_texts and class_name in form._meta.help_texts:
help_text = form._meta.help_texts[class_name]
else:
help_text = help_text_for_field(class_name, form._meta.model)
self.field = {
'name': class_name,
'label': label,
'help_text': help_text,
'field': field,
}
self.form = form
self.model_admin = model_admin
self.is_first = is_first
self.is_checkbox = False
self.is_readonly = True
def label_tag(self):
attrs = {}
if not self.is_first:
attrs["class"] = "inline"
label = self.field['label']
return format_html('<label{0}>{1}:</label>',
flatatt(attrs),
capfirst(force_text(label)))
def contents(self):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
field, obj, model_admin = self.field['field'], self.form.instance, self.model_admin
try:
f, attr, value = lookup_field(field, obj, model_admin)
except (AttributeError, ValueError, ObjectDoesNotExist):
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
boolean = getattr(attr, "boolean", False)
if boolean:
result_repr = _boolean_icon(value)
else:
result_repr = smart_text(value)
if getattr(attr, "allow_tags", False):
result_repr = mark_safe(result_repr)
else:
result_repr = linebreaksbr(result_repr, autoescape=True)
else:
if isinstance(f.rel, ManyToManyRel) and value is not None:
result_repr = ", ".join(map(six.text_type, value.all()))
else:
result_repr = display_for_field(value, f)
return conditional_escape(result_repr)
class InlineAdminFormSet(object):
"""
A wrapper around an inline formset for use in the admin system.
"""
def __init__(self, inline, formset, fieldsets, prepopulated_fields=None,
readonly_fields=None, model_admin=None):
self.opts = inline
self.formset = formset
self.fieldsets = fieldsets
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
if prepopulated_fields is None:
prepopulated_fields = {}
self.prepopulated_fields = prepopulated_fields
def __iter__(self):
for form, original in zip(self.formset.initial_forms, self.formset.get_queryset()):
view_on_site_url = self.opts.get_view_on_site_url(original)
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.prepopulated_fields, original, self.readonly_fields,
model_admin=self.opts, view_on_site_url=view_on_site_url)
for form in self.formset.extra_forms:
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.prepopulated_fields, None, self.readonly_fields,
model_admin=self.opts)
yield InlineAdminForm(self.formset, self.formset.empty_form,
self.fieldsets, self.prepopulated_fields, None,
self.readonly_fields, model_admin=self.opts)
def fields(self):
fk = getattr(self.formset, "fk", None)
for i, field_name in enumerate(flatten_fieldsets(self.fieldsets)):
if fk and fk.name == field_name:
continue
if field_name in self.readonly_fields:
yield {
'label': label_for_field(field_name, self.opts.model, self.opts),
'widget': {
'is_hidden': False
},
'required': False,
'help_text': help_text_for_field(field_name, self.opts.model),
}
else:
yield self.formset.form.base_fields[field_name]
def _media(self):
media = self.opts.media + self.formset.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class InlineAdminForm(AdminForm):
"""
A wrapper around an inline form for use in the admin system.
"""
def __init__(self, formset, form, fieldsets, prepopulated_fields, original,
readonly_fields=None, model_admin=None, view_on_site_url=None):
self.formset = formset
self.model_admin = model_admin
self.original = original
if original is not None:
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level.
from django.contrib.contenttypes.models import ContentType
self.original_content_type_id = ContentType.objects.get_for_model(original).pk
self.show_url = original and view_on_site_url is not None
self.absolute_url = view_on_site_url
super(InlineAdminForm, self).__init__(form, fieldsets, prepopulated_fields,
readonly_fields, model_admin)
def __iter__(self):
for name, options in self.fieldsets:
yield InlineFieldset(self.formset, self.form, name,
self.readonly_fields, model_admin=self.model_admin, **options)
def needs_explicit_pk_field(self):
# Auto fields are editable (oddly), so need to check for auto or non-editable pk
if self.form._meta.model._meta.has_auto_field or not self.form._meta.model._meta.pk.editable:
return True
# Also search any parents for an auto field. (The pk info is propagated to child
# models so that does not need to be checked in parents.)
for parent in self.form._meta.model._meta.get_parent_list():
if parent._meta.has_auto_field:
return True
return False
def field_count(self):
# tabular.html uses this function for colspan value.
num_of_fields = 0
if self.has_auto_field():
num_of_fields += 1
num_of_fields += len(self.fieldsets[0][1]["fields"])
if self.formset.can_order:
num_of_fields += 1
if self.formset.can_delete:
num_of_fields += 1
return num_of_fields
def pk_field(self):
return AdminField(self.form, self.formset._pk_field.name, False)
def fk_field(self):
fk = getattr(self.formset, "fk", None)
if fk:
return AdminField(self.form, fk.name, False)
else:
return ""
def deletion_field(self):
from django.forms.formsets import DELETION_FIELD_NAME
return AdminField(self.form, DELETION_FIELD_NAME, False)
def ordering_field(self):
from django.forms.formsets import ORDERING_FIELD_NAME
return AdminField(self.form, ORDERING_FIELD_NAME, False)
class InlineFieldset(Fieldset):
def __init__(self, formset, *args, **kwargs):
self.formset = formset
super(InlineFieldset, self).__init__(*args, **kwargs)
def __iter__(self):
fk = getattr(self.formset, "fk", None)
for field in self.fields:
if fk and fk.name == field:
continue
yield Fieldline(self.form, field, self.readonly_fields,
model_admin=self.model_admin)
class AdminErrorList(forms.utils.ErrorList):
"""
Stores all errors for the form/formsets in an add/change stage view.
"""
def __init__(self, form, inline_formsets):
super(AdminErrorList, self).__init__()
if form.is_bound:
self.extend(list(six.itervalues(form.errors)))
for inline_formset in inline_formsets:
self.extend(inline_formset.non_form_errors())
for errors_in_inline_form in inline_formset.errors:
self.extend(list(six.itervalues(errors_in_inline_form)))
|
apache-2.0
|
ryfeus/lambda-packs
|
LightGBM_sklearn_scipy_numpy/source/sklearn/cluster/dbscan_.py
|
18
|
12859
|
# -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# Lars Buitinck
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_array, check_consistent_length
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski', metric_params=None,
algorithm='auto', leaf_size=30, p=2, sample_weight=None, n_jobs=1):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
metric_params : dict, optional
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
For an example, see :ref:`examples/cluster/plot_dbscan.py
<sphx_glr_auto_examples_cluster_plot_dbscan.py>`.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = X.indices.astype(np.intp, copy=False)[X_mask]
masked_indptr = np.concatenate(([0], np.cumsum(X_mask)))[X.indptr[1:]]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
metric_params=metric_params, p=p,
n_jobs=n_jobs)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
.. versionadded:: 0.17
metric *precomputed* to accept precomputed sparse matrix.
metric_params : dict, optional
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
For an example, see :ref:`examples/cluster/plot_dbscan.py
<sphx_glr_auto_examples_cluster_plot_dbscan.py>`.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
metric_params=None, algorithm='auto', leaf_size=30, p=None,
n_jobs=1):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.metric_params = metric_params
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.n_jobs = n_jobs
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
y : Ignored
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight,
**self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
y : Ignored
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
|
mit
|
BizShuk/env_setup
|
pkg/sublime/MarkdownEditing/insert_references.py
|
2
|
5385
|
# -*- coding: UTF-8 -*-
import sublime
import sublime_plugin
import re
def get_clipboard_if_url():
# If the clipboard contains an URL, return it
# Otherwise, return an empty string
re_match_urls = re.compile(r"""((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.ββ][a-z]{2,4}/)(?:[^\s()<>]+|(([^\s()<>]+|(([^\s()<>]+)))*))+(?:(([^\s()<>]+|(ββ([^\s()<>]+)))*)|[^\s`!()[]{};:'".,<>?«»ββββ]))""", re.DOTALL)
m = re_match_urls.search(sublime.get_clipboard())
return m.group() if m else ''
def mangle_url(url):
url = url.strip()
if re.match(r'^([a-z0-9-]+\.)+\w{2,4}', url, re.IGNORECASE):
url = 'http://' + url
return url
def check_for_link(view, url):
titles = []
# Check if URL is already present as reference link
view.find_all(r'^\s{0,3}\[([^^\]]+)\]:[ \t]+' + re.escape(url) + '$', 0, '$1', titles)
return titles[0] if titles else None
def append_reference_link(edit, view, title, url):
# Detect if file ends with \n
if view.substr(view.size() - 1) == '\n':
nl = ''
else:
nl = '\n'
# Append the new reference link to the end of the file
view.insert(edit, view.size(), '{0}[{1}]: {2}\n'.format(nl, title, url))
def insert_references(edit, view, title):
# Add a reference with given title at the current cursor or around the current selection(s)
sels = view.sel()
caret = []
for sel in sels:
text = view.substr(sel)
# If something is selected...
if len(text) > 0:
# ... turn the selected text into the link text
view.replace(edit, sel, "[{0}][{1}]".format(text, title))
else:
# Add the link, with empty link text, and the caret positioned
# ready to type the link text
view.replace(edit, sel, "[][{0}]".format(title))
caret += [sublime.Region(sel.begin() + 1, sel.begin() + 1)]
if len(caret) > 0:
sels.clear()
for c in caret:
sels.add(c)
# Inspired by http://www.leancrew.com/all-this/2012/08/markdown-reference-links-in-bbedit/
# Appends a new reference link to end of document, using a user-input name and URL.
# Then inserts a reference to the link at the current selection(s).
class InsertNamedReferenceCommand(sublime_plugin.TextCommand):
def description(self):
return 'Insert Numbered Reference Link'
def run(self, edit):
self.view.window().show_input_panel(
'URL to link to:',
get_clipboard_if_url(),
self.receive_link,
None, None)
def receive_link(self, linkurl):
linkurl = mangle_url(linkurl)
newref = check_for_link(self.view, linkurl)
if newref:
# Link already exists, reuse existing reference
self.insert_link(linkurl, newref, False)
else:
self.view.window().show_input_panel(
'Name for reference:', '',
lambda newref: self.insert_link(linkurl, newref),
None, None)
def insert_link(self, linkurl, newref, actually_insert=True):
# Check if title is already present as reference
if actually_insert and self.view.find(r'^\s{0,3}\[' + re.escape(newref) + '\]:[ \t]+', 0):
sublime.error_message('A reference named "' + newref + '" already exists.')
self.view.window().show_input_panel(
'Name for reference:', '',
lambda newref: self.insert_link(linkurl, newref),
None, None)
return
edit = self.view.begin_edit()
try:
if actually_insert:
append_reference_link(edit, self.view, newref, linkurl)
insert_references(edit, self.view, newref)
finally:
self.view.end_edit(edit)
def is_enabled(self):
return bool(self.view.score_selector(self.view.sel()[0].a, "text.html.markdown"))
# Inspired by http://www.leancrew.com/all-this/2012/08/markdown-reference-links-in-bbedit/
# Appends a new reference link to end of document, using an autoincrementing number as the reference title.
# Then inserts a reference to the link at the current selection(s).
class InsertNumberedReferenceCommand(sublime_plugin.TextCommand):
def description(self):
return 'Insert Numbered Reference Link'
def run(self, edit):
self.view.window().show_input_panel(
'URL to link to:',
get_clipboard_if_url(),
self.insert_link,
None, None)
def insert_link(self, linkurl):
edit = self.view.begin_edit()
try:
linkurl = mangle_url(linkurl)
newref = check_for_link(self.view, linkurl)
if not newref:
# Find the next reference number
reflinks = self.view.find_all(r'(?<=^\[)(\d+)(?=\]: )')
if len(reflinks) == 0:
newref = 1
else:
newref = max(int(self.view.substr(reg)) for reg in reflinks) + 1
append_reference_link(edit, self.view, newref, linkurl)
insert_references(edit, self.view, newref)
finally:
self.view.end_edit(edit)
def is_enabled(self):
return bool(self.view.score_selector(self.view.sel()[0].a, "text.html.markdown"))
|
gpl-3.0
|
dschien/PyExcelModelingHelper
|
excel_helper/__init__.py
|
1
|
33092
|
import csv
import datetime
import importlib
import sys
from abc import abstractmethod
from collections import defaultdict
from typing import Dict, List, Set
import numpy as np
import pandas as pd
from dateutil import relativedelta as rdelta
import logging
from functools import partial
from xlrd import xldate_as_tuple
import calendar
from scipy.interpolate import interp1d
import json
__author__ = 'schien'
import pkg_resources # part of setuptools
version = pkg_resources.require("excel-modelling-helper")[0].version
param_name_map_v1 = {'variable': 'name', 'scenario': 'source_scenarios_string', 'module': 'module_name',
'distribution': 'distribution_name', 'param 1': 'param_a', 'param 2': 'param_b',
'param 3': 'param_c',
'unit': '', 'CAGR': 'cagr', 'ref date': 'ref_date', 'label': '', 'tags': '', 'comment': '',
'source': ''}
param_name_map_v2 = {'CAGR': 'cagr',
'comment': '',
'label': '',
'mean growth': 'growth_factor',
'param': '',
'ref date': 'ref_date',
'ref value': '',
'scenario': 'source_scenarios_string',
'source': '',
'tags': '',
'type': '',
'unit': '',
'variability growth': 'ef_growth_factor',
'initial_value_proportional_variation': '',
'variable': 'name'}
param_name_maps = {1: param_name_map_v1, 2: param_name_map_v2}
# logger.basicConfig(level=logger.DEBUG)
logger = logging.getLogger(__name__)
class DistributionFunctionGenerator(object):
module: str
distribution: str
param_a: str
param_b: str
param_c: str
def __init__(self, module_name=None, distribution_name=None, param_a: float = None,
param_b: float = None, param_c: float = None, size=None, **kwargs):
"""
Instantiate a new object.
:param module_name:
:param distribution_name:
:param param_a:
:param param_b:
:param param_c:
:param size:
:param kwargs: can contain key "sample_mean_value" with bool value
"""
self.kwargs = kwargs
self.size = size
self.module_name = module_name
self.distribution_name = distribution_name
self.sample_mean_value = kwargs.get('sample_mean_value', False)
# prepare function arguments
if distribution_name == 'choice':
if type(param_a) == str:
tokens = param_a.split(',')
params = [float(token.strip()) for token in tokens]
self.random_function_params = [np.array(params, dtype=np.float)]
else:
self.random_function_params = [np.array([i for i in [param_a, param_b, param_c] if i], dtype=np.float)]
logger.debug(f'setting function params for choice distribution {self.random_function_params}')
else:
self.random_function_params = [i for i in [param_a, param_b, param_c] if i not in [None, ""]]
def get_mean(self, distribution_function):
"""Get the mean value for a distribution.
If the distribution function is [normal, uniform,choice,triangular] the analytic value is being calculted.
Else, the distribution is instantiated and then the mean is being calculated.
:param distribution_function:
:return: the mean as a scalar
"""
name = self.distribution_name
params = self.random_function_params
if name == 'normal':
return params[0]
if name == 'uniform':
return (params[0] + params[1]) / 2.
if name == 'choice':
return params[0].mean()
if name == 'triangular':
return (params[0] + params[1] + params[2]) / 3.
return distribution_function().mean()
def generate_values(self, *args, **kwargs):
"""
Generate a sample of values by sampling from a distribution. The size of the sample can be overriden with the 'size' kwarg.
If `self.sample_mean_value == True` the sample will contain "size" times the mean value.
:param args:
:param kwargs:
:return: sample as vector of given size
"""
sample_size = kwargs.get('size', self.size)
f = self.instantiate_distribution_function(self.module_name, self.distribution_name)
distribution_function = partial(f, *self.random_function_params, size=sample_size)
if self.sample_mean_value:
sample = np.full(sample_size, self.get_mean(distribution_function))
else:
sample = distribution_function()
return sample
@staticmethod
def instantiate_distribution_function(module_name, distribution_name):
module = importlib.import_module(module_name)
func = getattr(module, distribution_name)
return func
class Parameter(object):
"""
A single parameter
"""
version: int
name: str
unit: str
comment: str
source: str
scenario: str
processes: Dict[str, List]
"optional comma-separated list of tags"
tags: str
def __init__(self, name, tags=None, source_scenarios_string: str = None, unit: str = None,
comment: str = None, source: str = None, version=None,
**kwargs):
# The source definition of scenarios. A comma-separated list
self.version = version
self.source = source
self.comment = comment
self.unit = unit
self.source_scenarios_string = source_scenarios_string
self.tags = tags
self.name = name
self.scenario = None
self.cache = None
# track the usages of this parameter per process as a list of process-specific variable names that are backed by this parameter
self.processes = defaultdict(list)
self.kwargs = kwargs
def __call__(self, settings=None, *args, **kwargs):
"""
Samples from a parameter. Values are cached and returns the same value every time called.
@todo confusing interface that accepts 'settings' and kwargs at the same time.
worse- 'use_time_series' must be present in the settings dict
:param args:
:param kwargs:
:return:
"""
if self.cache is None:
kwargs['name'] = self.name
kwargs['unit'] = self.unit
kwargs['tags'] = self.tags
kwargs['scenario'] = self.scenario
if not settings:
settings = {}
common_args = {'size': settings.get('sample_size', 1),
'sample_mean_value': settings.get('sample_mean_value', False)}
common_args.update(**self.kwargs)
if settings.get('use_time_series', False):
if self.version == 2:
generator = GrowthTimeSeriesGenerator(**common_args, times=settings['times'])
else:
generator = ConstantUncertaintyExponentialGrowthTimeSeriesGenerator(**common_args,
times=settings['times'])
else:
generator = DistributionFunctionGenerator(**common_args)
self.cache = generator.generate_values(*args, **kwargs)
return self.cache
def add_usage(self, process_name, variable_name):
# add the name of a variable of a process model that is backed by this parameter
self.processes[process_name].append(variable_name)
class GrowthTimeSeriesGenerator(DistributionFunctionGenerator):
ref_date: str
# of the mean values
# the type of growth ['exp']
# growth_function_type: str
# of the error function
variance: str
# error function growth rate
ef_growth_factor: str
def __init__(self, times=None, size=None, index_names=None, ref_date=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ref_date = ref_date if ref_date else None
self.times = times
self.size = size
iterables = [times, range(0, size)]
self._multi_index = pd.MultiIndex.from_product(iterables, names=index_names)
assert type(times.freq) == pd.tseries.offsets.MonthBegin, 'Time index must have monthly frequency'
def generate_values(self, *args, **kwargs):
"""
Instantiate a random variable and apply annual growth factors.
:return:
"""
assert 'ref value' in self.kwargs
# 1. Generate $\mu$
start_date = self.times[0].to_pydatetime()
end_date = self.times[-1].to_pydatetime()
ref_date = self.ref_date
if not ref_date:
raise Exception(f"Ref date not set for variable {kwargs['name']}")
mu = self.generate_mu(end_date, ref_date, start_date)
# 3. Generate $\sigma$
## Prepare array with growth values $\sigma$
if self.sample_mean_value:
sigma = np.zeros((len(self.times), self.size))
else:
if self.kwargs['type'] == 'interp':
def get_date(record):
return datetime.datetime.strptime(record[0], "%Y-%m-%d")
ref_value_ = sorted(json.loads(self.kwargs['ref value'].strip()).items(), key=get_date)
intial_value = ref_value_[0][1]
else:
intial_value = float(self.kwargs['ref value'])
variability_ = intial_value * self.kwargs['initial_value_proportional_variation']
logger.debug(f'sampling random distribution with parameters -{variability_}, 0, {variability_}')
sigma = np.random.triangular(-1 * variability_, 0, variability_, (len(self.times), self.size))
# logger.debug(ref_date.strftime("%b %d %Y"))
## 4. Prepare growth array for $\alpha_{sigma}$
alpha_sigma = growth_coefficients(start_date,
end_date,
ref_date,
self.kwargs['ef_growth_factor'], 1)
### 5. Prepare DataFrame
iterables = [self.times, range(self.size)]
index_names = ['time', 'samples']
_multi_index = pd.MultiIndex.from_product(iterables, names=index_names)
# logger.debug(start_date)
# logger.debug(end_date)
from dateutil import relativedelta
r = relativedelta.relativedelta(end_date, start_date)
months = r.years * 12 + r.months + 1
name = kwargs['name']
## Apply growth to $\sigma$ and add $\sigma$ to $\mu$
# logger.debug(sigma.size)
# logger.debug(alpha_sigma.shape)
# logger.debug(months)
unit_ = kwargs["unit"]
if not unit_:
unit_ = 'dimensionless'
series = pd.Series(((sigma * alpha_sigma) + mu.reshape(months, 1)).ravel(), index=_multi_index,
dtype=f'pint[{unit_}]')
## test if df has sub-zero values
df_sigma__dropna = series.where(series < 0).dropna()
if not df_sigma__dropna.pint.m.empty:
logger.warning(f"Negative values for parameter {name} from {df_sigma__dropna.index[0][0]}")
return series
def generate_mu(self, end_date, ref_date, start_date):
if self.kwargs['type'] == 'exp':
mu_bar = np.full(len(self.times), float(self.kwargs['ref value']))
# 2. Apply Growth to Mean Values $\alpha_{mu}$
alpha_mu = growth_coefficients(start_date,
end_date,
ref_date,
self.kwargs['growth_factor'], 1)
mu = mu_bar * alpha_mu.ravel()
mu = mu.reshape(len(self.times), 1)
return mu
if self.kwargs['type'] == 'interp':
def toTimestamp(d):
return calendar.timegm(d.timetuple())
def interpolate(growth_config: Dict[str, float], date_range, kind='linear'):
arr1 = np.array([toTimestamp(datetime.datetime.strptime(date_val, '%Y-%m-%d')) for date_val in
growth_config.keys()])
arr2 = np.array([val for val in growth_config.values()])
f = interp1d(arr1, arr2, kind=kind, fill_value='extrapolate')
return f([toTimestamp(date_val) for date_val in date_range])
ref_value_ = json.loads(self.kwargs['ref value'].strip())
return interpolate(ref_value_, self.times, self.kwargs['param'])
class ConstantUncertaintyExponentialGrowthTimeSeriesGenerator(DistributionFunctionGenerator):
cagr: str
ref_date: str
def __init__(self, cagr=None, times=None, size=None, index_names=None, ref_date=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cagr = cagr if cagr else 0
self.ref_date = ref_date if ref_date else None
self.times = times
self.size = size
iterables = [times, range(0, size)]
self._multi_index = pd.MultiIndex.from_product(iterables, names=index_names)
assert type(times.freq) == pd.tseries.offsets.MonthBegin, 'Time index must have monthly frequency'
def generate_values(self, *args, **kwargs):
"""
Instantiate a random variable and apply annual growth factors.
:return:
"""
values = super().generate_values(*args, **kwargs, size=(len(self.times) * self.size,))
alpha = self.cagr
# @todo - fill to cover the entire time: define rules for filling first
ref_date = self.ref_date if self.ref_date else self.times[0].to_pydatetime()
# assert ref_date >= self.times[0].to_pydatetime(), 'Ref date must be within variable time span.'
# assert ref_date <= self.times[-1].to_pydatetime(), 'Ref date must be within variable time span.'
start_date = self.times[0].to_pydatetime()
end_date = self.times[-1].to_pydatetime()
a = growth_coefficients(start_date, end_date, ref_date, alpha, self.size)
values *= a.ravel()
# df = pd.DataFrame(values)
# df.columns = [kwargs['name']]
# df.set_index(self._multi_index, inplace=True)
# # @todo this is a hack to return a series with index as I don't know how to set an index and rename a series
# data_series = df.iloc[:, 0]
# data_series._metadata = kwargs
# data_series.index.rename(['time', 'samples'], inplace=True)
#
if not kwargs["unit"]:
series = pd.Series(values, index=self._multi_index, dtype='pint[dimensionless]')
else:
series = pd.Series(values, index=self._multi_index, dtype=f'pint[{kwargs["unit"]}]')
return series
def growth_coefficients(start_date, end_date, ref_date, alpha, samples):
"""
Build a matrix of growth factors according to the CAGR formula y'=y0 (1+a)^(t'-t0).
a growth rate alpha
t0 start date
t' end date
y' output
y0 start value
"""
start_offset = 0
if ref_date < start_date:
offset_delta = rdelta.relativedelta(start_date, ref_date)
start_offset = offset_delta.months + 12 * offset_delta.years
start_date = ref_date
end_offset = 0
if ref_date > end_date:
offset_delta = rdelta.relativedelta(ref_date, end_date)
end_offset = offset_delta.months + 12 * offset_delta.years
end_date = ref_date
delta_ar = rdelta.relativedelta(ref_date, start_date)
ar = delta_ar.months + 12 * delta_ar.years
delta_br = rdelta.relativedelta(end_date, ref_date)
br = delta_br.months + 12 * delta_br.years
# we place the ref point on the lower interval (delta_ar + 1) but let it start from 0
# in turn we let the upper interval start from 1
g = np.fromfunction(lambda i, j: np.power(1 - alpha, np.abs(i) / 12), (ar + 1, samples), dtype=float)
h = np.fromfunction(lambda i, j: np.power(1 + alpha, np.abs(i + 1) / 12), (br, samples), dtype=float)
g = np.flipud(g)
# now join the two arrays
a = np.vstack((g, h))
if start_offset > 0:
a = a[start_offset:]
if end_offset > 0:
a = a[:-end_offset]
return a
class ParameterScenarioSet(object):
"""
The set of all version of a parameter for all the scenarios.
"""
default_scenario = 'default'
"the name of the parameters in this set"
parameter_name: str
scenarios: Dict[str, Parameter]
def __init__(self):
self.scenarios = {}
def add_scenario(self, parameter: 'Parameter', scenario_name: str = default_scenario):
"""
Add a scenario for this parameter.
:param scenario_name:
:param parameter:
:return:
"""
self.scenarios[scenario_name] = parameter
def __getitem__(self, item):
return self.scenarios.__getitem__(item)
def __setitem__(self, key, value):
return self.scenarios.__setitem__(key, value)
class ParameterRepository(object):
"""
Contains all known parameter definitions (so that it is not necessary to re-read the excel file for repeat param accesses).
The param definitions are independent from the sampling (the Param.__call__ method). Repeat access to __call__ will
create new samples.
Internally, parameters are organised together with all the scenario variants in a single ParameterScenarioSet.
"""
parameter_sets: Dict[str, ParameterScenarioSet]
tags: Dict[str, Dict[str, Set[Parameter]]]
def __init__(self):
self.parameter_sets = defaultdict(ParameterScenarioSet)
self.tags = defaultdict(lambda: defaultdict(set))
def add_all(self, parameters: List[Parameter]):
for p in parameters:
self.add_parameter(p)
def clear_cache(self):
for p_sets in self.parameter_sets.values():
for param_name, param in p_sets.scenarios.items():
param.cache = None
def add_parameter(self, parameter: Parameter):
"""
A parameter can have several scenarios. They are specified as a comma-separated list in a string.
:param parameter:
:return:
"""
# try reading the scenarios from the function arg or from the parameter attribute
scenario_string = parameter.source_scenarios_string
if scenario_string:
_scenarios = [i.strip() for i in scenario_string.split(',')]
self.fill_missing_attributes_from_default_parameter(parameter)
else:
_scenarios = [ParameterScenarioSet.default_scenario]
for scenario in _scenarios:
parameter.scenario = scenario
self.parameter_sets[parameter.name][scenario] = parameter
# record all tags for this parameter
if parameter.tags:
_tags = [i.strip() for i in parameter.tags.split(',')]
for tag in _tags:
self.tags[tag][parameter.name].add(parameter)
def fill_missing_attributes_from_default_parameter(self, param):
"""
Empty fields in Parameter definitions in scenarios are populated with default values.
E.g. in the example below, the source for the Power_TV variable in the 8K scenario would also be EnergyStar.
| name | scenario | val | tags | source |
|----------|----------|-----|--------|------------|
| Power_TV | | 60 | UD, TV | EnergyStar |
| Power_TV | 8K | 85 | new_tag| |
**Note** tags must not differ. In the example above, the 8K scenario variable the tags value would be overwritten
with the default value.
:param param:
:return:
"""
if not self.exists(param.name) or not ParameterScenarioSet.default_scenario in self.parameter_sets[
param.name].scenarios.keys():
logger.warning(
f'No default value for param {param.name} found.')
return
default = self.parameter_sets[param.name][ParameterScenarioSet.default_scenario]
for att_name, att_value in default.__dict__.items():
if att_name in ['unit', 'label', 'comment', 'source', 'tags']:
if att_name == 'tags' and default.tags != param.tags:
logger.warning(
f'For param {param.name} for scenarios {param.source_scenarios_string}, tags is different from default parameter tags. Overwriting with default values.')
setattr(param, att_name, att_value)
if not getattr(param, att_name):
logger.debug(
f'For param {param.name} for scenarios {param.source_scenarios_string}, populating attribute {att_name} with value {att_value} from default parameter.')
setattr(param, att_name, att_value)
def __getitem__(self, item) -> Parameter:
"""
Return the default scenario parameter for a given variable name
:param item: the name of the variable
:return:
"""
return self.get_parameter(item, scenario_name=ParameterScenarioSet.default_scenario)
def get_parameter(self, param_name, scenario_name=ParameterScenarioSet.default_scenario) -> Parameter:
if self.exists(param_name, scenario=scenario_name):
return self.parameter_sets[param_name][scenario_name]
try:
return self.parameter_sets[param_name][ParameterScenarioSet.default_scenario]
except KeyError:
raise KeyError(f"{param_name} not found")
def find_by_tag(self, tag) -> Dict[str, Set[Parameter]]:
"""
Get all registered dicts that are registered for a tag
:param tag: str - single tag
:return: a dict of {param name: set[Parameter]} that contains all ParameterScenarioSets for
all parameter names with a given tag
"""
return self.tags[tag]
def exists(self, param, scenario=None) -> bool:
# if scenario is not None:
# return
present = param in self.parameter_sets.keys()
if not present:
return False
scenario = scenario if scenario else ParameterScenarioSet.default_scenario
return scenario in self.parameter_sets[param].scenarios.keys()
def list_scenarios(self, param):
if param in self.parameter_sets.keys():
return self.parameter_sets[param].scenarios.keys()
class ExcelHandler(object):
version: int
def __init__(self):
self.version = 1
@abstractmethod
def load_definitions(self, sheet_name, filename=None):
raise NotImplementedError()
class OpenpyxlExcelHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
definitions = []
from openpyxl import load_workbook
wb = load_workbook(filename=filename, data_only=True)
_sheet_names = [sheet_name] if sheet_name else wb.sheetnames
for _sheet_name in _sheet_names:
sheet = wb.get_sheet_by_name(_sheet_name)
rows = list(sheet.rows)
header = [cell.value for cell in rows[0]]
if header[0] != 'variable':
continue
for row in rows[1:]:
values = {}
for key, cell in zip(header, row):
values[key] = cell.value
definitions.append(values)
return definitions
class Xlsx2CsvHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
from xlsx2csv import Xlsx2csv
data = Xlsx2csv(filename, inmemory=True).convert(None, sheetid=0)
definitions = []
_sheet_names = [sheet_name] if sheet_name else [data.keys()]
for _sheet_name in _sheet_names:
sheet = data[_sheet_name]
header = sheet.header
if header[0] != 'variable':
continue
for row in sheet.rows:
values = {}
for key, cell in zip(header, row):
values[key] = cell
definitions.append(values)
return definitions
class CSVHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
return csv.DictReader(open(filename), delimiter=',')
class PandasCSVHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
self.version = 2
import pandas as pd
df = pd.read_csv(filename, usecols=range(15), index_col=False, parse_dates=['ref date'],
dtype={'initial_value_proportional_variation': 'float64'},
dayfirst=True
# date_parser=lambda x: pd.datetime.strptime(x, '%d-%m-%Y')
)
df = df.dropna(subset=['variable', 'ref value'])
df.fillna("", inplace=True)
return df.to_dict(orient='records')
class XLRDExcelHandler(ExcelHandler):
version: int
@staticmethod
def get_sheet_range_bounds(filename, sheet_name):
import xlrd
wb = xlrd.open_workbook(filename)
sheet = wb.sheet_by_name(sheet_name)
rows = list(sheet.get_rows())
return len(rows)
def load_definitions(self, sheet_name, filename=None):
import xlrd
wb = xlrd.open_workbook(filename)
sh = None
definitions = []
_definition_tracking = defaultdict(dict)
_sheet_names = [sheet_name] if sheet_name else [sh.name for sh in wb.sheets()]
version = 1
try:
sheet = wb.sheet_by_name('metadata')
rows = list(sheet.get_rows())
for row in rows:
if row[0].value == 'version':
version = row[1].value
self.version = version
except:
logger.info(f'could not find a sheet with name "metadata" in workbook. defaulting to v2')
for _sheet_name in _sheet_names:
if _sheet_name == 'metadata':
continue
sheet = wb.sheet_by_name(_sheet_name)
rows = list(sheet.get_rows())
header = [cell.value for cell in rows[0]]
if header[0] != 'variable':
continue
for i, row in enumerate(rows[1:]):
values = {}
for key, cell in zip(header, row):
values[key] = cell.value
if not values['variable']:
# logger.debug(f'ignoring row {i}: {row}')
continue
if 'ref date' in values and values['ref date']:
if isinstance(values['ref date'], float):
values['ref date'] = datetime.datetime(*xldate_as_tuple(values['ref date'], wb.datemode))
if values['ref date'].day != 1:
logger.warning(f'ref date truncated to first of month for variable {values["variable"]}')
values['ref date'] = values['ref date'].replace(day=1)
else:
raise Exception(
f"{values['ref date']} for variable {values['variable']} is not a date - "
f"check spreadsheet value is a valid day of a month")
logger.debug(f'values for {values["variable"]}: {values}')
definitions.append(values)
scenario = values['scenario'] if values['scenario'] else "n/a"
if scenario in _definition_tracking[values['variable']]:
logger.error(
f"Duplicate entry for parameter "
f"with name <{values['variable']}> and <{scenario}> scenario in sheet {_sheet_name}")
raise ValueError(
f"Duplicate entry for parameter "
f"with name <{values['variable']}> and <{scenario}> scenario in sheet {_sheet_name}")
else:
_definition_tracking[values['variable']][scenario] = 1
return definitions
class XLWingsExcelHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
import xlwings as xw
definitions = []
wb = xw.Book(fullname=filename)
_sheet_names = [sheet_name] if sheet_name else wb.sheets
for _sheet_name in _sheet_names:
sheet = wb.sheets[_sheet_name]
range = sheet.range('A1').expand()
rows = range.rows
header = [cell.value for cell in rows[0]]
# check if this sheet contains parameters or if it documentation
if header[0] != 'variable':
continue
total_rows = XLRDExcelHandler.get_sheet_range_bounds(filename, _sheet_name)
range = sheet.range((1, 1), (total_rows, len(header)))
rows = range.rows
for row in rows[1:]:
values = {}
for key, cell in zip(header, row):
values[key] = cell.value
definitions.append(values)
return definitions
class ExcelParameterLoader(object):
definition_version: int
"""Utility to populate ParameterRepository from spreadsheets.
The structure of the spreadsheets is:
| variable | ... |
|----------|-----|
| ... | ... |
If the first row in a spreadsheet does not contain they keyword 'variable' the sheet is ignored.
"""
def __init__(self, filename, excel_handler='xlrd', **kwargs):
self.filename = filename
self.definition_version = 2
logger.info(f'Using {excel_handler} excel handler')
excel_handler_instance = None
if excel_handler == 'csv':
excel_handler_instance = CSVHandler()
if excel_handler == 'pandas':
excel_handler_instance = PandasCSVHandler()
if excel_handler == 'openpyxl':
excel_handler_instance = OpenpyxlExcelHandler()
if excel_handler == 'xlsx2csv':
excel_handler_instance = Xlsx2CsvHandler()
if excel_handler == 'xlwings':
excel_handler_instance = XLWingsExcelHandler()
if excel_handler == 'xlrd':
excel_handler_instance = XLRDExcelHandler()
self.excel_handler: ExcelHandler = excel_handler_instance
def load_parameter_definitions(self, sheet_name: str = None):
"""
Load variable text from rows in excel file.
If no spreadsheet arg is given, all spreadsheets are loaded.
The first cell in the first row in a spreadsheet must contain the keyword 'variable' or the sheet is ignored.
Any cells used as titles (with no associated value) are also added to the returned dictionary. However, the
values associated with each header will be None. For example, given the speadsheet:
| variable | A | B |
|----------|---|---|
| Title | | |
| Entry | 1 | 2 |
The following list of definitions would be returned:
[ { variable: 'Title', A: None, B: None }
, { variable: 'Entry', A: 1 , B: 2 }
]
:param sheet_name:
:return: list of dicts with {header col name : cell value} pairs
"""
definitions = self.excel_handler.load_definitions(sheet_name, filename=self.filename)
self.definition_version = self.excel_handler.version
return definitions
def load_into_repo(self, repository: ParameterRepository = None, sheet_name: str = None):
"""
Create a Repo from an excel file.
:param repository: the repository to load into
:param sheet_name:
:return:
"""
repository.add_all(self.load_parameters(sheet_name))
def load_parameters(self, sheet_name):
parameter_definitions = self.load_parameter_definitions(sheet_name=sheet_name)
params = []
param_name_map = param_name_maps[int(self.definition_version)]
for _def in parameter_definitions:
# substitute names from the headers with the kwargs names in the Parameter and Distributions classes
# e.g. 'variable' -> 'name', 'module' -> 'module_name', etc
parameter_kwargs_def = {}
for k, v in _def.items():
if k in param_name_map:
if param_name_map[k]:
parameter_kwargs_def[param_name_map[k]] = v
else:
parameter_kwargs_def[k] = v
name_ = parameter_kwargs_def['name']
del parameter_kwargs_def['name']
p = Parameter(name_, version=self.definition_version, **parameter_kwargs_def)
params.append(p)
return params
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.